code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def import_string(path):
"""
根据字符串的形式去导入路径中的对象
:param path: 'src.engine.agent.AgentHandler'
:return:
"""
module_path, cls_name = path.rsplit('.', maxsplit=1)
module = importlib.import_module(module_path)
return getattr(module, cls_name)
<|reserved_special_token_1|>
import importlib
def import_string(path):
"""
根据字符串的形式去导入路径中的对象
:param path: 'src.engine.agent.AgentHandler'
:return:
"""
module_path, cls_name = path.rsplit('.', maxsplit=1)
module = importlib.import_module(module_path)
return getattr(module, cls_name)
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding:utf-8 -*-
import importlib
def import_string(path):
"""
根据字符串的形式去导入路径中的对象
:param path: 'src.engine.agent.AgentHandler'
:return:
"""
module_path,cls_name = path.rsplit('.',maxsplit=1)
module = importlib.import_module(module_path)
return getattr(module,cls_name)
|
flexible
|
{
"blob_id": "8502ebdb13c68a9a56a1a4ba51370d8458ca81dc",
"index": 7944,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n module_path, cls_name = path.rsplit('.', maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module, cls_name)\n",
"step-3": "import importlib\n\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n module_path, cls_name = path.rsplit('.', maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module, cls_name)\n",
"step-4": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport importlib\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n\n module_path,cls_name = path.rsplit('.',maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module,cls_name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@admin.register(Predictions)
class PredictionsAdmin(admin.ModelAdmin):
pass
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Predictions
@admin.register(Predictions)
class PredictionsAdmin(admin.ModelAdmin):
pass
|
flexible
|
{
"blob_id": "bab78e8a88f9a26cc13fe0c301f82880cee2b680",
"index": 965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-3": "from django.contrib import admin\nfrom .models import Predictions\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
# In[1]:
import pandas as pd
import os,re,sys
import numpy as np
import glob as glob
# In[2]:
def createNewDataFrame():
columns = ['document_id','content','cat','subcat']
df_ = pd.DataFrame(columns=columns)
return(df_)
# In[3]:
def getcategories(foldername):
cats = foldername.split('_')
print("The cats are ", cats,len(cats))
cat =''
sub = ''
if (len(cats) == 1):
cat = cats[0]
sub = ''
if (len(cats) == 2):
cat = cats[0]
sub = cats[1]
if(len(cats) == 3):
cat = cats[0]+'/'+cats[1]
sub = cats[2]
if(len(cats) == 4):
cat = cats[0]+'/'+cats[1]
sub = cats[2]+'/'+cats[3]
return(cat,sub)
# In[4]:
global df
df = createNewDataFrame()
clientFolder='/home/medilenz/OCR_Process/Firm_logic_july_03/'
paths = glob.glob(clientFolder+'*/')
for item in paths:
pdffolders = glob.glob(item+'/*.pdf_work')
#print("THe item is ", item)
cat,subcat = getcategories(item.split('/')[-2])
for eachpdffolder in pdffolders:
doc_id=eachpdffolder.split('/')[-1].split('.')[0]
textfile = glob.glob(eachpdffolder+'page_*[^_6].txt')
if(len(textfile) < 2):
with open(eachpdffolder+'/page_0001.txt', 'r') as myfile0:
content = myfile0.read()
else :
with open(eachpdffolder+'/page_0001.txt', 'r') as myfile:
content = myfile.read()
with open(eachpdffolder+'/page_0002.txt', 'r') as myfile2:
content = content + myfile2.read()
df = df.append([{'document_id':doc_id, 'content':content,'cat':cat, 'subcat': subcat}],ignore_index=True)
df.to_csv("../corpus/Full_corpus_fromClientFolder.csv")
|
normal
|
{
"blob_id": "1aa01845ab98005b1fee33b4fc153bb029e450e0",
"index": 2061,
"step-1": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\n<mask token>\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-3": "<mask token>\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\ndf = createNewDataFrame()\nclientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'\npaths = glob.glob(clientFolder + '*/')\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-4": "import pandas as pd\nimport os, re, sys\nimport numpy as np\nimport glob as glob\n\n\ndef createNewDataFrame():\n columns = ['document_id', 'content', 'cat', 'subcat']\n df_ = pd.DataFrame(columns=columns)\n return df_\n\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print('The cats are ', cats, len(cats))\n cat = ''\n sub = ''\n if len(cats) == 1:\n cat = cats[0]\n sub = ''\n if len(cats) == 2:\n cat = cats[0]\n sub = cats[1]\n if len(cats) == 3:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2]\n if len(cats) == 4:\n cat = cats[0] + '/' + cats[1]\n sub = cats[2] + '/' + cats[3]\n return cat, sub\n\n\nglobal df\ndf = createNewDataFrame()\nclientFolder = '/home/medilenz/OCR_Process/Firm_logic_july_03/'\npaths = glob.glob(clientFolder + '*/')\nfor item in paths:\n pdffolders = glob.glob(item + '/*.pdf_work')\n cat, subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id = eachpdffolder.split('/')[-1].split('.')[0]\n textfile = glob.glob(eachpdffolder + 'page_*[^_6].txt')\n if len(textfile) < 2:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile0:\n content = myfile0.read()\n else:\n with open(eachpdffolder + '/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder + '/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n df = df.append([{'document_id': doc_id, 'content': content, 'cat':\n cat, 'subcat': subcat}], ignore_index=True)\ndf.to_csv('../corpus/Full_corpus_fromClientFolder.csv')\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport os,re,sys\nimport numpy as np\nimport glob as glob\n\n\n# In[2]:\n\ndef createNewDataFrame():\n \n columns = ['document_id','content','cat','subcat']\n df_ = pd.DataFrame(columns=columns)\n return(df_)\n\n\n# In[3]:\n\ndef getcategories(foldername):\n cats = foldername.split('_')\n print(\"The cats are \", cats,len(cats))\n cat =''\n sub = '' \n if (len(cats) == 1):\n cat = cats[0]\n sub = ''\n if (len(cats) == 2):\n cat = cats[0]\n sub = cats[1]\n if(len(cats) == 3):\n cat = cats[0]+'/'+cats[1]\n sub = cats[2]\n if(len(cats) == 4):\n cat = cats[0]+'/'+cats[1]\n sub = cats[2]+'/'+cats[3]\n \n return(cat,sub) \n\n\n# In[4]:\n\nglobal df\ndf = createNewDataFrame()\n\nclientFolder='/home/medilenz/OCR_Process/Firm_logic_july_03/'\n\npaths = glob.glob(clientFolder+'*/')\nfor item in paths:\n pdffolders = glob.glob(item+'/*.pdf_work') \n #print(\"THe item is \", item)\n cat,subcat = getcategories(item.split('/')[-2])\n for eachpdffolder in pdffolders:\n doc_id=eachpdffolder.split('/')[-1].split('.')[0] \n textfile = glob.glob(eachpdffolder+'page_*[^_6].txt') \n if(len(textfile) < 2):\n with open(eachpdffolder+'/page_0001.txt', 'r') as myfile0:\n content = myfile0.read() \n else :\n with open(eachpdffolder+'/page_0001.txt', 'r') as myfile:\n content = myfile.read()\n with open(eachpdffolder+'/page_0002.txt', 'r') as myfile2:\n content = content + myfile2.read()\n \n df = df.append([{'document_id':doc_id, 'content':content,'cat':cat, 'subcat': subcat}],ignore_index=True) \n\n\ndf.to_csv(\"../corpus/Full_corpus_fromClientFolder.csv\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def select():
result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=
'purple')
print(result)
btn1['bg'] = result[1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.minsize(300, 300)
def select():
result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=
'purple')
print(result)
btn1['bg'] = result[1]
<|reserved_special_token_0|>
btn1.pack()
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = tkinter.Tk()
root.minsize(300, 300)
def select():
result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=
'purple')
print(result)
btn1['bg'] = result[1]
btn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)
btn1.pack()
root.mainloop()
<|reserved_special_token_1|>
import tkinter
import tkinter.colorchooser
root = tkinter.Tk()
root.minsize(300, 300)
def select():
result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=
'purple')
print(result)
btn1['bg'] = result[1]
btn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)
btn1.pack()
root.mainloop()
<|reserved_special_token_1|>
#颜色选择对话框
import tkinter
import tkinter.colorchooser
root = tkinter.Tk()
root.minsize(300,300)
#添加颜色选择按钮
def select():
#打开颜色选择器
result = tkinter.colorchooser.askcolor(title = '内裤颜色种类',initialcolor = 'purple')
print(result)
#改变按钮颜色
btn1['bg'] = result[1]
btn1 = tkinter.Button(root,text = '请选择你的内裤颜色',command = select)
btn1.pack()
root.mainloop()
|
flexible
|
{
"blob_id": "dc261b29c1c11bb8449ff20a7f2fd120bef9efca",
"index": 6090,
"step-1": "<mask token>\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\n<mask token>\nbtn1.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = tkinter.Tk()\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\nbtn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)\nbtn1.pack()\nroot.mainloop()\n",
"step-4": "import tkinter\nimport tkinter.colorchooser\nroot = tkinter.Tk()\nroot.minsize(300, 300)\n\n\ndef select():\n result = tkinter.colorchooser.askcolor(title='内裤颜色种类', initialcolor=\n 'purple')\n print(result)\n btn1['bg'] = result[1]\n\n\nbtn1 = tkinter.Button(root, text='请选择你的内裤颜色', command=select)\nbtn1.pack()\nroot.mainloop()\n",
"step-5": "#颜色选择对话框\nimport tkinter\nimport tkinter.colorchooser\n\nroot = tkinter.Tk()\nroot.minsize(300,300)\n\n#添加颜色选择按钮\ndef select():\n #打开颜色选择器\n result = tkinter.colorchooser.askcolor(title = '内裤颜色种类',initialcolor = 'purple')\n print(result)\n #改变按钮颜色\n btn1['bg'] = result[1]\n\nbtn1 = tkinter.Button(root,text = '请选择你的内裤颜色',command = select)\nbtn1.pack()\n\n\n\n\nroot.mainloop()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
<|reserved_special_token_0|>
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in', 'ytick.direction': 'in',
'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,
'ytick.minor.size': 2}
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
pl.setp(axs.flat[2], title='Residuals')
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def pixels(pix, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(5, 5))
ax.imshow(pix, interpolation='none')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in', 'ytick.direction': 'in',
'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,
'ytick.minor.size': 2}
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
pl.setp(axs.flat[2], title='Residuals')
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sb.set_color_codes('muted')
<|reserved_special_token_0|>
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def pixels(pix, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(5, 5))
ax.imshow(pix, interpolation='none')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in', 'ytick.direction': 'in',
'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,
'ytick.minor.size': 2}
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
pl.setp(axs.flat[2], title='Residuals')
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
<|reserved_special_token_1|>
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as pl
import seaborn as sb
sb.set_color_codes('muted')
import scipy.optimize as op
from scipy import stats
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def pixels(pix, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(5, 5))
ax.imshow(pix, interpolation='none')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10, 3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=
'Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in', 'ytick.direction': 'in',
'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,
'ytick.minor.size': 2}
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
pl.setp(axs.flat[2], title='Residuals')
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
<|reserved_special_token_1|>
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as pl
import seaborn as sb
sb.set_color_codes('muted')
import scipy.optimize as op
from scipy import stats
def errorbar(t, f, s, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def pixels(pix, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(5,5))
ax.imshow(pix, interpolation='none')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def centroids(t, x, y, fp=None):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.plot(t, x, label='x', color='b')
ax.plot(t, y, label='y', color='r')
ax.legend()
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Centroid')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
def simple_ts(t, f, fp=None, **kwargs):
with sb.axes_style('white'):
fig, ax = pl.subplots(1, 1, figsize=(10,3))
ax.plot(t, f, 'bo', **kwargs)
pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',
ylabel='Normalized flux')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
# def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
# with sb.axes_style('white'):
# fig, axs = pl.subplots(1, 3, figsize=(10,3), sharex=True, sharey=False)
# axs.flat[0].plot(t, f, 'k.')
# axs.flat[0].plot(t, mod_full, '-', lw=2)
# axs.flat[1].plot(t, f_cor, 'k.')
# axs.flat[1].plot(t, mod_ma, '-', lw=5)
# axs.flat[2].plot(t, resid, 'k.')
# pl.setp(axs, xlim=[t.min(), t.max()], xticks=[], yticks=[])
# fig.tight_layout()
# if fp:
# fig.savefig(fp)
# pl.close()
def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):
rc = {'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 5,
'ytick.major.size': 5,
'xtick.minor.size': 2,
'ytick.minor.size': 2}
# t_offset = int(t[0])
# t_offset = 2450000
# t -= t_offset
with sb.axes_style('white', rc):
fig, axs = pl.subplots(3, 1, figsize=(6,6), sharex=True, sharey=False)
axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)
# axs.flat[0].plot(t, mod_full, 'r-', lw=1, label='Transit + Systematics')
axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')
# axs.flat[0].legend()
axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)
axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')
# axs.flat[1].legend()
axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)
axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)
axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)
axs.flat[0].minorticks_on()
axs.flat[1].minorticks_on()
axs.flat[2].minorticks_on()
pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)
pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')
pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')
# pl.setp(axs.flat[2], title='Precision: {0:.0f} ppm'.format(resid.std()*1e6), ylabel='Residuals')
pl.setp(axs.flat[2], title='Residuals')
# pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='T-{} [BJD]'.format(t_offset))
pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')
fig.tight_layout()
if fp:
fig.savefig(fp)
pl.close()
|
flexible
|
{
"blob_id": "1e929bc3c97de859a16a4ac8d5ac2ebadefd0516",
"index": 6624,
"step-1": "<mask token>\n\n\ndef errorbar(t, f, s, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\n<mask token>\n\n\ndef centroids(t, x, y, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, x, label='x', color='b')\n ax.plot(t, y, label='y', color='r')\n ax.legend()\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Centroid')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef simple_ts(t, f, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, f, 'bo', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n rc = {'xtick.direction': 'in', 'ytick.direction': 'in',\n 'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,\n 'ytick.minor.size': 2}\n with sb.axes_style('white', rc):\n fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)\n axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)\n axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')\n axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)\n axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')\n axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)\n axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)\n axs.flat[0].minorticks_on()\n axs.flat[1].minorticks_on()\n axs.flat[2].minorticks_on()\n pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)\n pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')\n pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')\n pl.setp(axs.flat[2], title='Residuals')\n pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n",
"step-2": "<mask token>\n\n\ndef errorbar(t, f, s, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef pixels(pix, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(5, 5))\n ax.imshow(pix, interpolation='none')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef centroids(t, x, y, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, x, label='x', color='b')\n ax.plot(t, y, label='y', color='r')\n ax.legend()\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Centroid')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef simple_ts(t, f, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, f, 'bo', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n rc = {'xtick.direction': 'in', 'ytick.direction': 'in',\n 'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,\n 'ytick.minor.size': 2}\n with sb.axes_style('white', rc):\n fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)\n axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)\n axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')\n axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)\n axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')\n axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)\n axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)\n axs.flat[0].minorticks_on()\n axs.flat[1].minorticks_on()\n axs.flat[2].minorticks_on()\n pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)\n pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')\n pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')\n pl.setp(axs.flat[2], title='Residuals')\n pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n",
"step-3": "<mask token>\nsb.set_color_codes('muted')\n<mask token>\n\n\ndef errorbar(t, f, s, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef pixels(pix, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(5, 5))\n ax.imshow(pix, interpolation='none')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef centroids(t, x, y, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, x, label='x', color='b')\n ax.plot(t, y, label='y', color='r')\n ax.legend()\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Centroid')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef simple_ts(t, f, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, f, 'bo', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n rc = {'xtick.direction': 'in', 'ytick.direction': 'in',\n 'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,\n 'ytick.minor.size': 2}\n with sb.axes_style('white', rc):\n fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)\n axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)\n axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')\n axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)\n axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')\n axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)\n axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)\n axs.flat[0].minorticks_on()\n axs.flat[1].minorticks_on()\n axs.flat[2].minorticks_on()\n pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)\n pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')\n pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')\n pl.setp(axs.flat[2], title='Residuals')\n pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n",
"step-4": "from __future__ import absolute_import\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport seaborn as sb\nsb.set_color_codes('muted')\nimport scipy.optimize as op\nfrom scipy import stats\n\n\ndef errorbar(t, f, s, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef pixels(pix, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(5, 5))\n ax.imshow(pix, interpolation='none')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef centroids(t, x, y, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, x, label='x', color='b')\n ax.plot(t, y, label='y', color='r')\n ax.legend()\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Centroid')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef simple_ts(t, f, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10, 3))\n ax.plot(t, f, 'bo', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]', ylabel=\n 'Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n rc = {'xtick.direction': 'in', 'ytick.direction': 'in',\n 'xtick.major.size': 5, 'ytick.major.size': 5, 'xtick.minor.size': 2,\n 'ytick.minor.size': 2}\n with sb.axes_style('white', rc):\n fig, axs = pl.subplots(3, 1, figsize=(6, 6), sharex=True, sharey=False)\n axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)\n axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')\n axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)\n axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')\n axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)\n axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)\n axs.flat[0].minorticks_on()\n axs.flat[1].minorticks_on()\n axs.flat[2].minorticks_on()\n pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)\n pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')\n pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')\n pl.setp(axs.flat[2], title='Residuals')\n pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n",
"step-5": "from __future__ import absolute_import\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport seaborn as sb\nsb.set_color_codes('muted')\nimport scipy.optimize as op\nfrom scipy import stats\n\n\ndef errorbar(t, f, s, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10,3))\n ax.errorbar(t, f, s, marker='o', color='b', linestyle='none', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',\n ylabel='Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef pixels(pix, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(5,5))\n ax.imshow(pix, interpolation='none')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef centroids(t, x, y, fp=None):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10,3))\n ax.plot(t, x, label='x', color='b')\n ax.plot(t, y, label='y', color='r')\n ax.legend()\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',\n ylabel='Centroid')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\ndef simple_ts(t, f, fp=None, **kwargs):\n with sb.axes_style('white'):\n fig, ax = pl.subplots(1, 1, figsize=(10,3))\n ax.plot(t, f, 'bo', **kwargs)\n pl.setp(ax, xlim=[t.min(), t.max()], xlabel='Time [BJD]',\n ylabel='Normalized flux')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n\n\n# def corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n# with sb.axes_style('white'):\n# fig, axs = pl.subplots(1, 3, figsize=(10,3), sharex=True, sharey=False)\n# axs.flat[0].plot(t, f, 'k.')\n# axs.flat[0].plot(t, mod_full, '-', lw=2)\n# axs.flat[1].plot(t, f_cor, 'k.')\n# axs.flat[1].plot(t, mod_ma, '-', lw=5)\n# axs.flat[2].plot(t, resid, 'k.')\n# pl.setp(axs, xlim=[t.min(), t.max()], xticks=[], yticks=[])\n# fig.tight_layout()\n# if fp:\n# fig.savefig(fp)\n# pl.close()\n\ndef corrected_ts(t, f, f_cor, mod_full, mod_ma, resid, fp=None):\n\n rc = {'xtick.direction': 'in',\n 'ytick.direction': 'in',\n 'xtick.major.size': 5,\n 'ytick.major.size': 5,\n 'xtick.minor.size': 2,\n 'ytick.minor.size': 2}\n\n # t_offset = int(t[0])\n # t_offset = 2450000\n # t -= t_offset\n\n with sb.axes_style('white', rc):\n fig, axs = pl.subplots(3, 1, figsize=(6,6), sharex=True, sharey=False)\n axs.flat[0].plot(t, f, 'ko', ms=5, alpha=0.6)\n # axs.flat[0].plot(t, mod_full, 'r-', lw=1, label='Transit + Systematics')\n axs.flat[0].plot(t, mod_full, 'r-', lw=1.5, label='Model')\n # axs.flat[0].legend()\n axs.flat[1].plot(t, f_cor, 'ko', ms=5, alpha=0.6)\n axs.flat[1].plot(t, mod_ma, 'r-', lw=3, label='Transit')\n # axs.flat[1].legend()\n axs.flat[2].plot(t, resid, 'ko', ms=5, alpha=0.6)\n axs.flat[0].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[1].yaxis.get_major_formatter().set_useOffset(False)\n axs.flat[2].xaxis.get_major_formatter().set_useOffset(False)\n axs.flat[0].minorticks_on()\n axs.flat[1].minorticks_on()\n axs.flat[2].minorticks_on()\n pl.setp(axs.flat[2].xaxis.get_majorticklabels(), rotation=20)\n pl.setp(axs.flat[0], title='Raw data', ylabel='Normalized flux')\n pl.setp(axs.flat[1], title='Corrected', ylabel='Normalized flux')\n # pl.setp(axs.flat[2], title='Precision: {0:.0f} ppm'.format(resid.std()*1e6), ylabel='Residuals')\n pl.setp(axs.flat[2], title='Residuals')\n # pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='T-{} [BJD]'.format(t_offset))\n pl.setp(axs.flat[2], xlim=[t.min(), t.max()], xlabel='Time [BJD]')\n fig.tight_layout()\n if fp:\n fig.savefig(fp)\n pl.close()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class ExcelOperation:
def __init__(self, filename=None):
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
<|reserved_special_token_0|>
def setCell(self, sheet, row, col, value):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15
sht.Cells(row, col).Font.Bold = True
sht.Cells(row, col).Font.Name = 'Arial'
sht.Cells(row, col).Interior.ColorIndex = 3
sht.Cells(row, col).BorderAround(1, 4)
sht.Rows(3).RowHeight = 30
sht.Cells(row, col).HorizontalAlignment = -4131
sht.Cells(row, col).VerticalAlignment = -4160
<|reserved_special_token_0|>
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()
sht.Columns(row).Delete()
<|reserved_special_token_0|>
def addPicture(self, sheet, pictureName, Left, Top, Width, Height):
"""Insert a picture in sheet"""
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before):
"""copy sheet"""
shts = self.xlBook.Worksheets
shts(1).Copy(None, shts(1))
def judgeRowHeight(self, OperStr):
print('正在完成要求', OperStr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def judgeSort(self, OperStr):
print('正在完成要求', OperStr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def judgeOperFromStr(self, OperStr):
if OperStr.find('行高') != -1:
print('进入行高操作')
self.judgeRowHeight(OperStr)
print('结束行高操作')
if OperStr.find('列宽') != -1:
print('进入列宽操作')
self.judgeColWidth(OperStr)
print('结束列宽操作')
if OperStr.find('公式') != -1:
print('进入公式操作')
self.judgeFormula(OperStr)
print('结束公式操作')
if OperStr.find('函数') != -1:
print('进入函数操作')
self.judgeFunction(OperStr)
print('结束函数操作')
if OperStr.find('所有框线') != -1:
print('进入所有框线操作')
self.judgeBoxLine(OperStr)
print('结束所有框线操作')
if OperStr.find('排序') != -1:
print('进入排序操作')
self.judgeSort(OperStr)
print('结束排序操作')
if OperStr.find('图表') != -1:
print('进入图表操作')
self.judgeChart(OperStr)
print('结束图表操作')
pass
<|reserved_special_token_0|>
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self, style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeSwitch(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeBackground(self, OperStr):
print('正在完成要求', OperStr)
def judgeHyperlink(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('动画') != -1:
print('进入动画操作')
self.judgeAnimation(OperStr)
print('结束动画操作')
if OperStr.find('切换') != -1:
print('进入切换操作')
self.judgeSwitch(OperStr)
print('结束切换操作')
if OperStr.find('超级链接') != -1:
print('进入超级链接操作')
self.judgeHyperlink(OperStr)
print('结束超级链接操作')
if OperStr.find('背景') != -1:
print('进入背景操作')
self.judgeBackground(OperStr)
print('结束背景操作')
if OperStr.find('插入') != -1:
print('进入插入操作')
self.judgeInsert(OperStr)
print('结束插入操作')
<|reserved_special_token_0|>
class OperationTypeJudge:
def __init__(self):
pass
def getType(self, OperStr):
if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
<|reserved_special_token_0|>
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WordOperation:
def __init__(self, filename=None):
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ExcelOperation:
def __init__(self, filename=None):
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col):
"""Get value of one cell"""
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15
sht.Cells(row, col).Font.Bold = True
sht.Cells(row, col).Font.Name = 'Arial'
sht.Cells(row, col).Interior.ColorIndex = 3
sht.Cells(row, col).BorderAround(1, 4)
sht.Rows(3).RowHeight = 30
sht.Cells(row, col).HorizontalAlignment = -4131
sht.Cells(row, col).VerticalAlignment = -4160
def rowHeightOper(self, sheet, row, height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()
sht.Columns(row).Delete()
def getRange(self, sheet, row1, col1, row2, col2):
"""return a 2d array (i.e. tuple of tuples)"""
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height):
"""Insert a picture in sheet"""
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before):
"""copy sheet"""
shts = self.xlBook.Worksheets
shts(1).Copy(None, shts(1))
def judgeRowHeight(self, OperStr):
print('正在完成要求', OperStr)
def judgeColWidth(self, OperStr):
print('正在完成要求', OperStr)
def judgeFormula(self, OperStr):
print('正在完成要求', OperStr)
def judgeFunction(self, OperStr):
print('正在完成要求', OperStr)
def judgeSort(self, OperStr):
print('正在完成要求', OperStr)
def judgeChart(self, OperStr):
print('正在完成要求', OperStr)
def judgeBoxLine(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('行高') != -1:
print('进入行高操作')
self.judgeRowHeight(OperStr)
print('结束行高操作')
if OperStr.find('列宽') != -1:
print('进入列宽操作')
self.judgeColWidth(OperStr)
print('结束列宽操作')
if OperStr.find('公式') != -1:
print('进入公式操作')
self.judgeFormula(OperStr)
print('结束公式操作')
if OperStr.find('函数') != -1:
print('进入函数操作')
self.judgeFunction(OperStr)
print('结束函数操作')
if OperStr.find('所有框线') != -1:
print('进入所有框线操作')
self.judgeBoxLine(OperStr)
print('结束所有框线操作')
if OperStr.find('排序') != -1:
print('进入排序操作')
self.judgeSort(OperStr)
print('结束排序操作')
if OperStr.find('图表') != -1:
print('进入图表操作')
self.judgeChart(OperStr)
print('结束图表操作')
pass
<|reserved_special_token_0|>
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self, style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeSwitch(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeBackground(self, OperStr):
print('正在完成要求', OperStr)
def judgeHyperlink(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('动画') != -1:
print('进入动画操作')
self.judgeAnimation(OperStr)
print('结束动画操作')
if OperStr.find('切换') != -1:
print('进入切换操作')
self.judgeSwitch(OperStr)
print('结束切换操作')
if OperStr.find('超级链接') != -1:
print('进入超级链接操作')
self.judgeHyperlink(OperStr)
print('结束超级链接操作')
if OperStr.find('背景') != -1:
print('进入背景操作')
self.judgeBackground(OperStr)
print('结束背景操作')
if OperStr.find('插入') != -1:
print('进入插入操作')
self.judgeInsert(OperStr)
print('结束插入操作')
<|reserved_special_token_0|>
class OperationTypeJudge:
def __init__(self):
pass
def getType(self, OperStr):
if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
<|reserved_special_token_0|>
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WordOperation:
def __init__(self, filename=None):
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
else:
pass
def close(self):
del self.wordApp
def fontOper(self):
pass
def replaceOper(self, source, dest):
pass
def insertOper(self, style):
pass
def pageOper(self):
pass
def paragraphOper(self):
pass
def judgePage(self, OperStr):
print('正在完成要求', OperStr)
def judgeFont(self, OperStr):
print('正在完成要求', OperStr)
def judgeReplace(self, OperStr):
print('正在完成要求', OperStr)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeParagraph(self, OperStr):
print('正在完成要求', OperStr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ExcelOperation:
def __init__(self, filename=None):
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col):
"""Get value of one cell"""
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15
sht.Cells(row, col).Font.Bold = True
sht.Cells(row, col).Font.Name = 'Arial'
sht.Cells(row, col).Interior.ColorIndex = 3
sht.Cells(row, col).BorderAround(1, 4)
sht.Rows(3).RowHeight = 30
sht.Cells(row, col).HorizontalAlignment = -4131
sht.Cells(row, col).VerticalAlignment = -4160
def rowHeightOper(self, sheet, row, height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()
sht.Columns(row).Delete()
def getRange(self, sheet, row1, col1, row2, col2):
"""return a 2d array (i.e. tuple of tuples)"""
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height):
"""Insert a picture in sheet"""
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before):
"""copy sheet"""
shts = self.xlBook.Worksheets
shts(1).Copy(None, shts(1))
def judgeRowHeight(self, OperStr):
print('正在完成要求', OperStr)
def judgeColWidth(self, OperStr):
print('正在完成要求', OperStr)
def judgeFormula(self, OperStr):
print('正在完成要求', OperStr)
def judgeFunction(self, OperStr):
print('正在完成要求', OperStr)
def judgeSort(self, OperStr):
print('正在完成要求', OperStr)
def judgeChart(self, OperStr):
print('正在完成要求', OperStr)
def judgeBoxLine(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('行高') != -1:
print('进入行高操作')
self.judgeRowHeight(OperStr)
print('结束行高操作')
if OperStr.find('列宽') != -1:
print('进入列宽操作')
self.judgeColWidth(OperStr)
print('结束列宽操作')
if OperStr.find('公式') != -1:
print('进入公式操作')
self.judgeFormula(OperStr)
print('结束公式操作')
if OperStr.find('函数') != -1:
print('进入函数操作')
self.judgeFunction(OperStr)
print('结束函数操作')
if OperStr.find('所有框线') != -1:
print('进入所有框线操作')
self.judgeBoxLine(OperStr)
print('结束所有框线操作')
if OperStr.find('排序') != -1:
print('进入排序操作')
self.judgeSort(OperStr)
print('结束排序操作')
if OperStr.find('图表') != -1:
print('进入图表操作')
self.judgeChart(OperStr)
print('结束图表操作')
pass
<|reserved_special_token_0|>
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self, style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeSwitch(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeBackground(self, OperStr):
print('正在完成要求', OperStr)
def judgeHyperlink(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('动画') != -1:
print('进入动画操作')
self.judgeAnimation(OperStr)
print('结束动画操作')
if OperStr.find('切换') != -1:
print('进入切换操作')
self.judgeSwitch(OperStr)
print('结束切换操作')
if OperStr.find('超级链接') != -1:
print('进入超级链接操作')
self.judgeHyperlink(OperStr)
print('结束超级链接操作')
if OperStr.find('背景') != -1:
print('进入背景操作')
self.judgeBackground(OperStr)
print('结束背景操作')
if OperStr.find('插入') != -1:
print('进入插入操作')
self.judgeInsert(OperStr)
print('结束插入操作')
<|reserved_special_token_0|>
class OperationTypeJudge:
def __init__(self):
pass
def getType(self, OperStr):
if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
<|reserved_special_token_0|>
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WinOperation:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def rename(self, sourceFilename, destFilename):
print(sourceFilename, '文件改名为', destFilename)
pass
def mov(self, sourceFilename, destFilename):
print(sourceFilename, '移动文件为', destFilename)
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def realSourceFilename(self, soucePath, sourceFilename):
return sourceFilename
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def judgeCopy(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
pass
<|reserved_special_token_0|>
def judgeOperFromList(self, OperStrList):
for item in OperStrList:
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class WordOperation:
def __init__(self, filename=None):
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
else:
pass
def close(self):
del self.wordApp
def fontOper(self):
pass
def replaceOper(self, source, dest):
pass
def insertOper(self, style):
pass
def pageOper(self):
pass
def paragraphOper(self):
pass
def judgePage(self, OperStr):
print('正在完成要求', OperStr)
def judgeFont(self, OperStr):
print('正在完成要求', OperStr)
def judgeReplace(self, OperStr):
print('正在完成要求', OperStr)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeParagraph(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('标题') != -1 or OperStr.find('黑体'
) != -1 or OperStr.find('居中对齐') != -1:
print('进入字体操作')
self.judgeFont(OperStr)
print('结束字体')
elif OperStr.find('首行缩进') != -1 or OperStr.find('行距') != -1:
print('进入段落操作')
self.judgeParagraph(OperStr)
print('结束段落操作')
elif OperStr.find('插入') != -1:
print('进入插入操作')
self.judgeInsert(OperStr)
print('结束插入操作')
elif OperStr.find('页边距') != -1:
print('进入页边距操作')
self.judgePage(OperStr)
print('结束页边距操作')
elif OperStr.find('分栏') != -1:
print('进入分栏操作')
self.judgeFont(OperStr)
print('结束分栏操作')
elif OperStr.find('替换') != -1:
print('进入替换操作')
self.judgeReplace(OperStr)
print('结束替换操作')
<|reserved_special_token_0|>
class ExcelOperation:
def __init__(self, filename=None):
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None):
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col):
"""Get value of one cell"""
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col):
"""set value of one cell"""
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15
sht.Cells(row, col).Font.Bold = True
sht.Cells(row, col).Font.Name = 'Arial'
sht.Cells(row, col).Interior.ColorIndex = 3
sht.Cells(row, col).BorderAround(1, 4)
sht.Rows(3).RowHeight = 30
sht.Cells(row, col).HorizontalAlignment = -4131
sht.Cells(row, col).VerticalAlignment = -4160
def rowHeightOper(self, sheet, row, height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()
sht.Columns(row).Delete()
def getRange(self, sheet, row1, col1, row2, col2):
"""return a 2d array (i.e. tuple of tuples)"""
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height):
"""Insert a picture in sheet"""
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before):
"""copy sheet"""
shts = self.xlBook.Worksheets
shts(1).Copy(None, shts(1))
def judgeRowHeight(self, OperStr):
print('正在完成要求', OperStr)
def judgeColWidth(self, OperStr):
print('正在完成要求', OperStr)
def judgeFormula(self, OperStr):
print('正在完成要求', OperStr)
def judgeFunction(self, OperStr):
print('正在完成要求', OperStr)
def judgeSort(self, OperStr):
print('正在完成要求', OperStr)
def judgeChart(self, OperStr):
print('正在完成要求', OperStr)
def judgeBoxLine(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('行高') != -1:
print('进入行高操作')
self.judgeRowHeight(OperStr)
print('结束行高操作')
if OperStr.find('列宽') != -1:
print('进入列宽操作')
self.judgeColWidth(OperStr)
print('结束列宽操作')
if OperStr.find('公式') != -1:
print('进入公式操作')
self.judgeFormula(OperStr)
print('结束公式操作')
if OperStr.find('函数') != -1:
print('进入函数操作')
self.judgeFunction(OperStr)
print('结束函数操作')
if OperStr.find('所有框线') != -1:
print('进入所有框线操作')
self.judgeBoxLine(OperStr)
print('结束所有框线操作')
if OperStr.find('排序') != -1:
print('进入排序操作')
self.judgeSort(OperStr)
print('结束排序操作')
if OperStr.find('图表') != -1:
print('进入图表操作')
self.judgeChart(OperStr)
print('结束图表操作')
pass
<|reserved_special_token_0|>
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self, style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeSwitch(self, OperStr):
print('正在完成要求', OperStr)
pattern = re.compile('“(.*)”')
print(pattern.findall(OperStr))
strFile = str(pattern.findall(OperStr))
file1 = strFile.split('”')
source = file1[0][2:]
print(source)
file2 = strFile.split('“')
dest = file2[1][0:-2]
print(dest)
def judgeInsert(self, OperStr):
print('正在完成要求', OperStr)
def judgeBackground(self, OperStr):
print('正在完成要求', OperStr)
def judgeHyperlink(self, OperStr):
print('正在完成要求', OperStr)
def judgeOperFromStr(self, OperStr):
if OperStr.find('动画') != -1:
print('进入动画操作')
self.judgeAnimation(OperStr)
print('结束动画操作')
if OperStr.find('切换') != -1:
print('进入切换操作')
self.judgeSwitch(OperStr)
print('结束切换操作')
if OperStr.find('超级链接') != -1:
print('进入超级链接操作')
self.judgeHyperlink(OperStr)
print('结束超级链接操作')
if OperStr.find('背景') != -1:
print('进入背景操作')
self.judgeBackground(OperStr)
print('结束背景操作')
if OperStr.find('插入') != -1:
print('进入插入操作')
self.judgeInsert(OperStr)
print('结束插入操作')
<|reserved_special_token_0|>
class OperationTypeJudge:
def __init__(self):
pass
def getType(self, OperStr):
if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
<|reserved_special_token_0|>
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self, item):
pass
def getQusetionPic(self, item):
pass
def getAnswer(self, item):
pass
def getCorrectAnswer(self, item):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
__author__ = 'tqs'
from win32com.client import Dispatch
import win32com.client
import time
import os
import re
import win32api
'''
windows操作部分说明:
考试波及知识点:
1.删除文件及文件夹
2.复制文件及文件夹
3.移动文件及文件夹
4.文件及文件夹改名
5.文件属性
考试样例:
1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。
2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。
3、设置“螺旋藻.aaa”文件属性为“只读”。
4、在桌面上建立“绿色植物”的快捷方式。
'''
class WinOperation:
def __init__(self):
self.soucePath = ''
self.destPath = ''
self.destFilename = ''
self.sourceFilename = ''
def dele(self,destFilename):#删除文件及文件夹
print('删除文件',destFilename)
pass
def rename(self,sourceFilename,destFilename):#文件改名
print(sourceFilename,'文件改名为',destFilename)
pass
def mov(self,sourceFilename,destFilename):#移动文件
print(sourceFilename,'移动文件为',destFilename)
pass
def copy(self,sourceFilename,destFilename):#复制文件
print(sourceFilename,'移动文件为',destFilename)
pass
def prop(self,destFilename):#文件属性
print('文件属性',destFilename)
pass
def realSourceFilename(self,soucePath,sourceFilename):
return sourceFilename
def realdestFilename(self,destPath,destFilename):
return destFilename
def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeDele(self,OperStr):#从文本中判断删除文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
pass
def judgeRename(self,OperStr):#从文本中判断重命名文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeMov(self,OperStr):#从文本中判断移动文件
#形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析
#解析为源文件,目标文件
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
#需要再取得完整路径,需要查找
sourceFilename=self.realSourceFilename("d:\zrexam\windows",source)
destFilename=self.realdestFilename("d:\zrexam\windows",dest)
self.mov(sourceFilename,destFilename)
def judgeCopy(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
pass
def judgeProp(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)
pass
def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作
for item in OperStrList:
pass
def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表
pass
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("新建") !=-1:
print("进入新建操作")
self.judgeNew(OperStr)
print("结束新建操作")
if OperStr.find("删除") !=-1:
print("进入删除操作")
self.judgeDele(OperStr)
print("结束删除操作")
if OperStr.find("复制") !=-1:
print("进入复制操作")
self.judgeCopy(OperStr)
print("结束复制操作")
if OperStr.find("移动") !=-1:
print("进入移动操作")
self.judgeMov(OperStr)
print("结束移动操作")
if OperStr.find("改名") !=-1:
print("进入改名操作")
self.judgeRename(OperStr)
print("结束改名操作")
if OperStr.find("属性") !=-1:
print("进入属性操作")
self.judgeProp(OperStr)
print("结束属性操作")
'''
word操作部分说明:
考试波及知识点:
1.字体
2.段落
3.查找替换
4.插入 表格,艺术字,图片
5.页边距,分栏
1. 将标题“师恩难忘”设置为黑体,居中对齐。
2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。
3.将文中所有的“田老师”替换为“田先生”。
4. 设置页边距为上下各2.5厘米(应用于整篇文档)。
5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。
考试样例:
'''
class WordOperation:
def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
def save(self, newfilename=None): #保存文件
if newfilename:
self.filename = newfilename
else:
pass
def close(self): #关闭文件
del self.wordApp
def fontOper(self):
pass
def replaceOper(self,source,dest):
pass
def insertOper(self,style):
pass
def pageOper(self):
pass
def paragraphOper(self):
pass
def judgePage(self,OperStr):
print('正在完成要求',OperStr)
def judgeFont(self,OperStr):
print('正在完成要求',OperStr)
def judgeReplace(self,OperStr):
print('正在完成要求',OperStr)
def judgeInsert(self,OperStr):
print('正在完成要求',OperStr)
def judgeParagraph(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("标题") !=-1 or OperStr.find("黑体") !=-1 or OperStr.find("居中对齐") !=-1:
print("进入字体操作")
self.judgeFont(OperStr)
print("结束字体")
elif OperStr.find("首行缩进") !=-1 or OperStr.find("行距") !=-1:
print("进入段落操作")
self.judgeParagraph(OperStr)
print("结束段落操作")
elif OperStr.find("插入") !=-1:
print("进入插入操作")
self.judgeInsert(OperStr)
print("结束插入操作")
elif OperStr.find("页边距") !=-1:
print("进入页边距操作")
self.judgePage(OperStr)
print("结束页边距操作")
elif OperStr.find("分栏") !=-1:
print("进入分栏操作")
self.judgeFont(OperStr)
print("结束分栏操作")
elif OperStr.find("替换") !=-1:
print("进入替换操作")
self.judgeReplace(OperStr)
print("结束替换操作")
'''
Excel操作部分说明:
考试波及知识点:
1.行高列宽
2.格式相关
3.公式函数
4.排序
5.插入图表
考试样例:
1.将A2所在行的行高设置为30(40像素)。
2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。
3.给A2:F8单元格区域加所有框线。
4.按“无人机社团人数”由高到低排序。
5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。
'''
class ExcelOperation:
def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None): #保存文件
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self): #关闭文件
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col): #获取单元格的数据
"Get value of one cell"
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value): #设置单元格的数据
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col): #设置单元格的数据
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15#字体大小
sht.Cells(row, col).Font.Bold = True#是否黑体
sht.Cells(row, col).Font.Name = "Arial"#字体类型
sht.Cells(row, col).Interior.ColorIndex = 3#表格背景
#sht.Range("A1").Borders.LineStyle = xlDouble
sht.Cells(row, col).BorderAround(1,4)#表格边框
sht.Rows(3).RowHeight = 30#行高
sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter
sht.Cells(row, col).VerticalAlignment = -4160 #
def rowHeightOper(self,sheet,row,height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()#删除行
sht.Columns(row).Delete()#删除列
def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组
"return a 2d array (i.e. tuple of tuples)"
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片
"Insert a picture in sheet"
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before): #复制工作表
"copy sheet"
shts = self.xlBook.Worksheets
shts(1).Copy(None,shts(1))
def judgeRowHeight(self,OperStr):#行高操作
print('正在完成要求',OperStr)
def judgeColWidth(self,OperStr):
print('正在完成要求',OperStr)
def judgeFormula(self,OperStr):
print('正在完成要求',OperStr)
def judgeFunction(self,OperStr):
print('正在完成要求',OperStr)
def judgeSort(self,OperStr):
print('正在完成要求',OperStr)
def judgeChart(self,OperStr):
print('正在完成要求',OperStr)
def judgeBoxLine(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("行高") !=-1:
print("进入行高操作")
self.judgeRowHeight(OperStr)
print("结束行高操作")
if OperStr.find("列宽") !=-1:
print("进入列宽操作")
self.judgeColWidth(OperStr)
print("结束列宽操作")
if OperStr.find("公式") !=-1:
print("进入公式操作")
self.judgeFormula(OperStr)
print("结束公式操作")
if OperStr.find("函数") !=-1:
print("进入函数操作")
self.judgeFunction(OperStr)
print("结束函数操作")
if OperStr.find("所有框线") !=-1:
print("进入所有框线操作")
self.judgeBoxLine(OperStr)
print("结束所有框线操作")
if OperStr.find("排序") !=-1:
print("进入排序操作")
self.judgeSort(OperStr)
print("结束排序操作")
if OperStr.find("图表") !=-1:
print("进入图表操作")
self.judgeChart(OperStr)
print("结束图表操作")
pass
'''
PPT操作部分说明:
1.动画效果
2.切换效果
3.超级链接
4.背景
5.插入,图片,声音,视频
考试样例:
1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。
2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。
3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。
4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。
5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。
'''
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self,style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
def judgeSwitch(self,OperStr):
print('正在完成要求',OperStr)
pattern = re.compile('“(.*)”')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("”")
source=file1[0][2:]#获得源文件
print(source)
file2=strFile.split("“")
dest=file2[1][0:-2]#获得目标文件
print(dest)
def judgeInsert(self,OperStr):
print('正在完成要求',OperStr)
def judgeBackground(self,OperStr):
print('正在完成要求',OperStr)
def judgeHyperlink(self,OperStr):
print('正在完成要求',OperStr)
def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作
if OperStr.find("动画") !=-1:
print("进入动画操作")
self.judgeAnimation(OperStr)
print("结束动画操作")
if OperStr.find("切换") !=-1:
print("进入切换操作")
self.judgeSwitch(OperStr)
print("结束切换操作")
if OperStr.find("超级链接") !=-1:
print("进入超级链接操作")
self.judgeHyperlink(OperStr)
print("结束超级链接操作")
if OperStr.find("背景") !=-1:
print("进入背景操作")
self.judgeBackground(OperStr)
print("结束背景操作")
if OperStr.find("插入") !=-1:
print("进入插入操作")
self.judgeInsert(OperStr)
print("结束插入操作")
'''
Input文字录入操作部分说明:
考试波及知识点:
com对象的调用演示:
class InputOperation:
'''
class OperationTypeJudge:
def __init__(self):
pass
def getType(self,OperStr):
if OperStr.find("替换") !=-1 or OperStr.find("首行缩进") !=-1:
print('这是word题要求')
print('已转word题处理')
elif OperStr.find("公式") !=-1 or OperStr.find("函数") !=-1:
print('这是excel题要求')
print('已转excel题处理')
elif OperStr.find("切换") !=-1 or OperStr.find("动画") !=-1:
print('这是ppt题要求')
print('已转ppt题处理')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
'''
选择题部分说明:
'''
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
'''
判断题部分说明:
'''
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
if __name__ == "__main__":
win=WinOperation()
win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。')
win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。')
win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。')
win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。')
word=WordOperation()
word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。')
word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。')
word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。')
word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。')
word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。')
excel=ExcelOperation(r'c:/test.xls')
excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。')
excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。')
excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。')
excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。')
excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。')
ppt=PptOperation()
ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。')
ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。')
ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。')
ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。')
ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')
|
flexible
|
{
"blob_id": "b453006b4d4c5f17bb58110fe8197d7796ca0c6c",
"index": 467,
"step-1": "<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n <mask token>\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n <mask token>\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n <mask token>\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n <mask token>\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n <mask token>\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n <mask token>\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass WinOperation:\n <mask token>\n <mask token>\n\n def rename(self, sourceFilename, destFilename):\n print(sourceFilename, '文件改名为', destFilename)\n pass\n\n def mov(self, sourceFilename, destFilename):\n print(sourceFilename, '移动文件为', destFilename)\n pass\n <mask token>\n <mask token>\n\n def realSourceFilename(self, soucePath, sourceFilename):\n return sourceFilename\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def judgeCopy(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n pass\n <mask token>\n\n def judgeOperFromList(self, OperStrList):\n for item in OperStrList:\n pass\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass WordOperation:\n\n def __init__(self, filename=None):\n self.wordApp = win32com.client.Dispatch('Word.Application')\n if filename:\n self.filename = filename\n else:\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n else:\n pass\n\n def close(self):\n del self.wordApp\n\n def fontOper(self):\n pass\n\n def replaceOper(self, source, dest):\n pass\n\n def insertOper(self, style):\n pass\n\n def pageOper(self):\n pass\n\n def paragraphOper(self):\n pass\n\n def judgePage(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFont(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeReplace(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeParagraph(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('标题') != -1 or OperStr.find('黑体'\n ) != -1 or OperStr.find('居中对齐') != -1:\n print('进入字体操作')\n self.judgeFont(OperStr)\n print('结束字体')\n elif OperStr.find('首行缩进') != -1 or OperStr.find('行距') != -1:\n print('进入段落操作')\n self.judgeParagraph(OperStr)\n print('结束段落操作')\n elif OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n elif OperStr.find('页边距') != -1:\n print('进入页边距操作')\n self.judgePage(OperStr)\n print('结束页边距操作')\n elif OperStr.find('分栏') != -1:\n print('进入分栏操作')\n self.judgeFont(OperStr)\n print('结束分栏操作')\n elif OperStr.find('替换') != -1:\n print('进入替换操作')\n self.judgeReplace(OperStr)\n print('结束替换操作')\n\n\n<mask token>\n\n\nclass ExcelOperation:\n\n def __init__(self, filename=None):\n self.xlApp = win32com.client.Dispatch('Excel.Application')\n if filename:\n self.filename = filename\n self.xlBook = self.xlApp.Workbooks.Open(filename)\n else:\n self.xlBook = self.xlApp.Workbooks.Add()\n self.filename = ''\n\n def save(self, newfilename=None):\n if newfilename:\n self.filename = newfilename\n self.xlBook.SaveAs(newfilename)\n else:\n self.xlBook.Save()\n\n def close(self):\n self.xlBook.Close(SaveChanges=0)\n del self.xlApp\n\n def getCell(self, sheet, row, col):\n \"\"\"Get value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Cells(row, col).Value\n\n def setCell(self, sheet, row, col, value):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Value = value\n\n def setCellformat(self, sheet, row, col):\n \"\"\"set value of one cell\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Cells(row, col).Font.Size = 15\n sht.Cells(row, col).Font.Bold = True\n sht.Cells(row, col).Font.Name = 'Arial'\n sht.Cells(row, col).Interior.ColorIndex = 3\n sht.Cells(row, col).BorderAround(1, 4)\n sht.Rows(3).RowHeight = 30\n sht.Cells(row, col).HorizontalAlignment = -4131\n sht.Cells(row, col).VerticalAlignment = -4160\n\n def rowHeightOper(self, sheet, row, height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height\n\n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()\n sht.Columns(row).Delete()\n\n def getRange(self, sheet, row1, col1, row2, col2):\n \"\"\"return a 2d array (i.e. tuple of tuples)\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value\n\n def addPicture(self, sheet, pictureName, Left, Top, Width, Height):\n \"\"\"Insert a picture in sheet\"\"\"\n sht = self.xlBook.Worksheets(sheet)\n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n\n def cpSheet(self, before):\n \"\"\"copy sheet\"\"\"\n shts = self.xlBook.Worksheets\n shts(1).Copy(None, shts(1))\n\n def judgeRowHeight(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeColWidth(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFormula(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeFunction(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeSort(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeChart(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBoxLine(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('行高') != -1:\n print('进入行高操作')\n self.judgeRowHeight(OperStr)\n print('结束行高操作')\n if OperStr.find('列宽') != -1:\n print('进入列宽操作')\n self.judgeColWidth(OperStr)\n print('结束列宽操作')\n if OperStr.find('公式') != -1:\n print('进入公式操作')\n self.judgeFormula(OperStr)\n print('结束公式操作')\n if OperStr.find('函数') != -1:\n print('进入函数操作')\n self.judgeFunction(OperStr)\n print('结束函数操作')\n if OperStr.find('所有框线') != -1:\n print('进入所有框线操作')\n self.judgeBoxLine(OperStr)\n print('结束所有框线操作')\n if OperStr.find('排序') != -1:\n print('进入排序操作')\n self.judgeSort(OperStr)\n print('结束排序操作')\n if OperStr.find('图表') != -1:\n print('进入图表操作')\n self.judgeChart(OperStr)\n print('结束图表操作')\n pass\n\n\n<mask token>\n\n\nclass PptOperation:\n\n def __init__(self):\n pass\n\n def AnimationOper(self):\n pass\n\n def SwitchOper(self):\n pass\n\n def InsertOper(self, style):\n pass\n\n def BackgroundOper(self):\n pass\n\n def HyperlinkOper(self):\n pass\n\n def judgeAnimation(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeSwitch(self, OperStr):\n print('正在完成要求', OperStr)\n pattern = re.compile('“(.*)”')\n print(pattern.findall(OperStr))\n strFile = str(pattern.findall(OperStr))\n file1 = strFile.split('”')\n source = file1[0][2:]\n print(source)\n file2 = strFile.split('“')\n dest = file2[1][0:-2]\n print(dest)\n\n def judgeInsert(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeBackground(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeHyperlink(self, OperStr):\n print('正在完成要求', OperStr)\n\n def judgeOperFromStr(self, OperStr):\n if OperStr.find('动画') != -1:\n print('进入动画操作')\n self.judgeAnimation(OperStr)\n print('结束动画操作')\n if OperStr.find('切换') != -1:\n print('进入切换操作')\n self.judgeSwitch(OperStr)\n print('结束切换操作')\n if OperStr.find('超级链接') != -1:\n print('进入超级链接操作')\n self.judgeHyperlink(OperStr)\n print('结束超级链接操作')\n if OperStr.find('背景') != -1:\n print('进入背景操作')\n self.judgeBackground(OperStr)\n print('结束背景操作')\n if OperStr.find('插入') != -1:\n print('进入插入操作')\n self.judgeInsert(OperStr)\n print('结束插入操作')\n\n\n<mask token>\n\n\nclass OperationTypeJudge:\n\n def __init__(self):\n pass\n\n def getType(self, OperStr):\n if OperStr.find('替换') != -1 or OperStr.find('首行缩进') != -1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find('公式') != -1 or OperStr.find('函数') != -1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find('切换') != -1 or OperStr.find('动画') != -1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n\n def getOperaPath(self):\n pass\n\n def getOperaFileName(self):\n pass\n\n\n<mask token>\n\n\nclass SelectOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n\n\nclass JudgeOperation:\n\n def __init__(self):\n pass\n\n def getQusetionTxt(self, item):\n pass\n\n def getQusetionPic(self, item):\n pass\n\n def getAnswer(self, item):\n pass\n\n def getCorrectAnswer(self, item):\n pass\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*-\n__author__ = 'tqs'\nfrom win32com.client import Dispatch \nimport win32com.client \nimport time\nimport os\nimport re\nimport win32api\n'''\nwindows操作部分说明:\n考试波及知识点:\n1.删除文件及文件夹\n2.复制文件及文件夹\n3.移动文件及文件夹\n4.文件及文件夹改名\n5.文件属性\n考试样例:\n1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。\n2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。\n3、设置“螺旋藻.aaa”文件属性为“只读”。\n4、在桌面上建立“绿色植物”的快捷方式。\n'''\nclass WinOperation:\n def __init__(self):\n self.soucePath = ''\n self.destPath = ''\n self.destFilename = ''\n self.sourceFilename = ''\n def dele(self,destFilename):#删除文件及文件夹\n print('删除文件',destFilename)\n pass\n def rename(self,sourceFilename,destFilename):#文件改名\n print(sourceFilename,'文件改名为',destFilename)\n pass\n def mov(self,sourceFilename,destFilename):#移动文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def copy(self,sourceFilename,destFilename):#复制文件\n print(sourceFilename,'移动文件为',destFilename)\n pass\n def prop(self,destFilename):#文件属性\n print('文件属性',destFilename)\n pass\n def realSourceFilename(self,soucePath,sourceFilename):\n return sourceFilename\n def realdestFilename(self,destPath,destFilename):\n return destFilename\n def judgeNew(self,OperStr):#从文本中判断新建文件或文件夹\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeDele(self,OperStr):#从文本中判断删除文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n pass\n def judgeRename(self,OperStr):#从文本中判断重命名文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeMov(self,OperStr):#从文本中判断移动文件\n #形如将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。这种结构的解析\n #解析为源文件,目标文件\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n #需要再取得完整路径,需要查找\n sourceFilename=self.realSourceFilename(\"d:\\zrexam\\windows\",source)\n destFilename=self.realdestFilename(\"d:\\zrexam\\windows\",dest)\n self.mov(sourceFilename,destFilename)\n def judgeCopy(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n pass\n def judgeProp(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)\n## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)\n pass\n def judgeOperFromList(self,OperStrList):#根据各小题选择对应的操作\n for item in OperStrList:\n pass\n def getOperStrListFromFile(self,filename):#从文件中将各小题放入列表 \n pass\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"新建\") !=-1:\n print(\"进入新建操作\")\n self.judgeNew(OperStr)\n print(\"结束新建操作\")\n if OperStr.find(\"删除\") !=-1:\n print(\"进入删除操作\")\n self.judgeDele(OperStr)\n print(\"结束删除操作\")\n if OperStr.find(\"复制\") !=-1:\n print(\"进入复制操作\")\n self.judgeCopy(OperStr)\n print(\"结束复制操作\")\n if OperStr.find(\"移动\") !=-1:\n print(\"进入移动操作\")\n self.judgeMov(OperStr)\n print(\"结束移动操作\")\n if OperStr.find(\"改名\") !=-1:\n print(\"进入改名操作\")\n self.judgeRename(OperStr)\n print(\"结束改名操作\")\n if OperStr.find(\"属性\") !=-1:\n print(\"进入属性操作\")\n self.judgeProp(OperStr)\n print(\"结束属性操作\")\n \n'''\nword操作部分说明:\n考试波及知识点:\n1.字体\n2.段落\n3.查找替换\n4.插入 表格,艺术字,图片\n5.页边距,分栏\n\n1. 将标题“师恩难忘”设置为黑体,居中对齐。\n2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。\n3.将文中所有的“田老师”替换为“田先生”。\n4. 设置页边距为上下各2.5厘米(应用于整篇文档)。\n5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。\n考试样例:\n'''\nclass WordOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.wordApp = win32com.client.Dispatch('Word.Application') \n if filename: \n self.filename = filename\n else:\n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename\n else:\n pass \n def close(self): #关闭文件\n del self.wordApp \n def fontOper(self): \n pass\n def replaceOper(self,source,dest):\n pass\n def insertOper(self,style):\n pass\n def pageOper(self):\n pass\n def paragraphOper(self):\n pass\n def judgePage(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFont(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeReplace(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeParagraph(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"标题\") !=-1 or OperStr.find(\"黑体\") !=-1 or OperStr.find(\"居中对齐\") !=-1:\n print(\"进入字体操作\")\n self.judgeFont(OperStr)\n print(\"结束字体\")\n elif OperStr.find(\"首行缩进\") !=-1 or OperStr.find(\"行距\") !=-1:\n print(\"进入段落操作\")\n self.judgeParagraph(OperStr) \n print(\"结束段落操作\")\n elif OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n elif OperStr.find(\"页边距\") !=-1:\n print(\"进入页边距操作\")\n self.judgePage(OperStr)\n print(\"结束页边距操作\")\n elif OperStr.find(\"分栏\") !=-1:\n print(\"进入分栏操作\")\n self.judgeFont(OperStr)\n print(\"结束分栏操作\")\n elif OperStr.find(\"替换\") !=-1:\n print(\"进入替换操作\")\n self.judgeReplace(OperStr)\n print(\"结束替换操作\")\n \n'''\nExcel操作部分说明:\n考试波及知识点:\n1.行高列宽\n2.格式相关\n3.公式函数\n4.排序\n5.插入图表\n\n考试样例:\n1.将A2所在行的行高设置为30(40像素)。\n2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。\n3.给A2:F8单元格区域加所有框线。\n4.按“无人机社团人数”由高到低排序。\n5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。\n\n'''\nclass ExcelOperation:\n def __init__(self, filename=None): #打开文件或者新建文件(如果不存在的话)\n self.xlApp = win32com.client.Dispatch('Excel.Application') \n if filename: \n self.filename = filename \n self.xlBook = self.xlApp.Workbooks.Open(filename) \n else: \n self.xlBook = self.xlApp.Workbooks.Add() \n self.filename = ''\n def save(self, newfilename=None): #保存文件\n if newfilename: \n self.filename = newfilename \n self.xlBook.SaveAs(newfilename) \n else: \n self.xlBook.Save() \n def close(self): #关闭文件\n self.xlBook.Close(SaveChanges=0) \n del self.xlApp \n def getCell(self, sheet, row, col): #获取单元格的数据\n \"Get value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n return sht.Cells(row, col).Value \n def setCell(self, sheet, row, col, value): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Value = value\n def setCellformat(self, sheet, row, col): #设置单元格的数据\n \"set value of one cell\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Cells(row, col).Font.Size = 15#字体大小\n sht.Cells(row, col).Font.Bold = True#是否黑体\n sht.Cells(row, col).Font.Name = \"Arial\"#字体类型\n sht.Cells(row, col).Interior.ColorIndex = 3#表格背景\n #sht.Range(\"A1\").Borders.LineStyle = xlDouble\n sht.Cells(row, col).BorderAround(1,4)#表格边框\n sht.Rows(3).RowHeight = 30#行高\n sht.Cells(row, col).HorizontalAlignment = -4131 #水平居中xlCenter\n sht.Cells(row, col).VerticalAlignment = -4160 #\n def rowHeightOper(self,sheet,row,height):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).RowHeight = height \n def deleteRow(self, sheet, row):\n sht = self.xlBook.Worksheets(sheet)\n sht.Rows(row).Delete()#删除行\n sht.Columns(row).Delete()#删除列\n def getRange(self, sheet, row1, col1, row2, col2): #获得一块区域的数据,返回为一个二维元组\n \"return a 2d array (i.e. tuple of tuples)\" \n sht = self.xlBook.Worksheets(sheet)\n return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value \n def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #插入图片\n \"Insert a picture in sheet\" \n sht = self.xlBook.Worksheets(sheet) \n sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)\n def cpSheet(self, before): #复制工作表\n \"copy sheet\" \n shts = self.xlBook.Worksheets \n shts(1).Copy(None,shts(1))\n def judgeRowHeight(self,OperStr):#行高操作\n print('正在完成要求',OperStr)\n def judgeColWidth(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFormula(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeFunction(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeSort(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeChart(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBoxLine(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n if OperStr.find(\"行高\") !=-1:\n print(\"进入行高操作\")\n self.judgeRowHeight(OperStr)\n print(\"结束行高操作\")\n if OperStr.find(\"列宽\") !=-1:\n print(\"进入列宽操作\")\n self.judgeColWidth(OperStr)\n print(\"结束列宽操作\")\n if OperStr.find(\"公式\") !=-1:\n print(\"进入公式操作\")\n self.judgeFormula(OperStr)\n print(\"结束公式操作\")\n if OperStr.find(\"函数\") !=-1:\n print(\"进入函数操作\")\n self.judgeFunction(OperStr)\n print(\"结束函数操作\")\n if OperStr.find(\"所有框线\") !=-1:\n print(\"进入所有框线操作\")\n self.judgeBoxLine(OperStr)\n print(\"结束所有框线操作\")\n if OperStr.find(\"排序\") !=-1:\n print(\"进入排序操作\")\n self.judgeSort(OperStr)\n print(\"结束排序操作\")\n if OperStr.find(\"图表\") !=-1:\n print(\"进入图表操作\")\n self.judgeChart(OperStr)\n print(\"结束图表操作\")\n pass\n \n'''\nPPT操作部分说明:\n1.动画效果\n2.切换效果\n3.超级链接\n4.背景\n5.插入,图片,声音,视频\n\n考试样例:\n1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。\n2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。\n3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。\n4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。\n5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。\n'''\n\nclass PptOperation:\n def __init__(self):\n pass\n def AnimationOper(self):\n pass\n def SwitchOper(self):\n pass\n def InsertOper(self,style):\n pass\n def BackgroundOper(self):\n pass\n def HyperlinkOper(self):\n pass\n def judgeAnimation(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeSwitch(self,OperStr):\n print('正在完成要求',OperStr)\n pattern = re.compile('“(.*)”')\n print (pattern.findall(OperStr))\n strFile=str(pattern.findall(OperStr))\n file1=strFile.split(\"”\")\n source=file1[0][2:]#获得源文件\n print(source)\n file2=strFile.split(\"“\")\n dest=file2[1][0:-2]#获得目标文件\n print(dest)\n def judgeInsert(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeBackground(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeHyperlink(self,OperStr):\n print('正在完成要求',OperStr)\n def judgeOperFromStr(self,OperStr):#根据小题文本选择对应的操作\n \n if OperStr.find(\"动画\") !=-1:\n print(\"进入动画操作\")\n self.judgeAnimation(OperStr)\n print(\"结束动画操作\")\n if OperStr.find(\"切换\") !=-1:\n print(\"进入切换操作\")\n self.judgeSwitch(OperStr)\n print(\"结束切换操作\")\n if OperStr.find(\"超级链接\") !=-1:\n print(\"进入超级链接操作\")\n self.judgeHyperlink(OperStr)\n print(\"结束超级链接操作\")\n if OperStr.find(\"背景\") !=-1:\n print(\"进入背景操作\")\n self.judgeBackground(OperStr)\n print(\"结束背景操作\")\n if OperStr.find(\"插入\") !=-1:\n print(\"进入插入操作\")\n self.judgeInsert(OperStr)\n print(\"结束插入操作\")\n \n'''\nInput文字录入操作部分说明:\n考试波及知识点:\ncom对象的调用演示:\nclass InputOperation:\n'''\nclass OperationTypeJudge:\n def __init__(self):\n pass\n def getType(self,OperStr):\n if OperStr.find(\"替换\") !=-1 or OperStr.find(\"首行缩进\") !=-1:\n print('这是word题要求')\n print('已转word题处理')\n elif OperStr.find(\"公式\") !=-1 or OperStr.find(\"函数\") !=-1:\n print('这是excel题要求')\n print('已转excel题处理')\n elif OperStr.find(\"切换\") !=-1 or OperStr.find(\"动画\") !=-1:\n print('这是ppt题要求')\n print('已转ppt题处理')\n pass\n def getOperaPath(self):\n pass\n def getOperaFileName(self):\n pass\n'''\n选择题部分说明:\n''' \nclass SelectOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass\n \n'''\n判断题部分说明:\n''' \nclass JudgeOperation: \n def __init__(self):\n pass \n def getQusetionTxt(self,item):\n pass\n def getQusetionPic(self,item):\n pass\n def getAnswer(self,item):\n pass\n def getCorrectAnswer(self,item):\n pass \nif __name__ == \"__main__\":\n win=WinOperation()\n win.judgeOperFromStr('1、在“蕨类植物”文件夹中,新建一个子文件夹“薄囊蕨类”。')\n win.judgeOperFromStr('2、将文件“淡水藻.ddd”移动到“藻类植物”文件夹中。')\n win.judgeOperFromStr('3、设置“螺旋藻.aaa”文件属性为“只读”。')\n win.judgeOperFromStr('4、在桌面上建立“绿色植物”的快捷方式。')\n\n word=WordOperation()\n word.judgeOperFromStr('1. 将标题“师恩难忘”设置为黑体,居中对齐。')\n word.judgeOperFromStr('2.将文中第二段(这个小学设在一座庙内……)设置为首行缩进2字符。')\n word.judgeOperFromStr('3.将文中所有的“田老师”替换为“田先生”。')\n word.judgeOperFromStr('4. 设置页边距为上下各2.5厘米(应用于整篇文档)。')\n word.judgeOperFromStr('5. 在正文下面的空白处插入艺术字,内容为“师恩难忘”(样式任选)。')\n\n excel=ExcelOperation(r'c:/test.xls')\n excel.judgeOperFromStr('1.将A2所在行的行高设置为30(40像素)。')\n excel.judgeOperFromStr('2.根据工作表中提供的公式,计算各班级的“3D社团参与比例”,并将结果填写在F3:F7单元格内。')\n excel.judgeOperFromStr('3.给A2:F8单元格区域加所有框线。')\n excel.judgeOperFromStr('4.按“无人机社团人数”由高到低排序。')\n excel.judgeOperFromStr('5.选定A2:B7单元格区域,制作“三维折线图”,并插入到Sheet1工作表中。')\n\n ppt=PptOperation()\n ppt.judgeOperFromStr('1.在第四张幻灯片的上方插入横排文本框,在文本框中输入“吃月饼”,字体黑体,字号32。')\n ppt.judgeOperFromStr('2.将第三张幻灯片的背景填充效果设置为纹理填充,纹理为“鱼类化石”。')\n ppt.judgeOperFromStr('3.设置第三张幻灯片的切换效果为“推进”,声音为“鼓掌”。')\n ppt.judgeOperFromStr('4.给第四张幻灯片右侧的图片设置进入中的“劈裂”动画效果,效果选项为“中央向上下展开”。')\n ppt.judgeOperFromStr('5.给第三张幻灯片中的文字“赏桂花”添加超链接,使其链接到第五张幻灯片。')\n",
"step-ids": [
42,
52,
64,
71,
87
]
}
|
[
42,
52,
64,
71,
87
] |
import rambench
rambench.perform_benchmark()
|
normal
|
{
"blob_id": "3d1f2130043613dc8d5bbd773edd96c87c355de9",
"index": 3455,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrambench.perform_benchmark()\n",
"step-3": "import rambench\nrambench.perform_benchmark()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import matplotlib.pyplot as plt
# some important constants
x_bound = y_bound = 1.
dx = dy = 0.05
k = 0.1
nx, ny = int(x_bound/dx), int(y_bound/dy)
dx2, dy2 = dx*dx, dy*dy
dt = (dx2 / k) / 4.0
t_end = 80 * dt
# set the grid
u0 = np.zeros((nx, ny))
u_exact = np.zeros((nx, ny))
u = np.zeros((nx, ny))
def get_exact(x, y, t, trunc):
"""Get the exact solution at a set t
"""
Z=0
for n in range(1, trunc):
for m in range(1, trunc):
Z_num = -120 * ( ((-n)**4 * np.pi**4 * (-1)**n) +\
(12 * n**2 * np.pi ** 2 * (-1)**n)\
+ 24 + (24 * (-1)**(1+n))\
*(-2 + (2*(-1)**m) ) )
Z_num_xy = np.sin(n*x*np.pi)*np.sin(m*y*np.pi)\
* np.exp(-(n**2 + m**2) * np.pi**2 * k * t)
Z_denom = n**7 * np.pi**10 * m**3
Z += Z_num * Z_num_xy / Z_denom
return Z
def get_L_norm(exact, u):
diffs = abs(exact - u)
l_diffs = []
for row in diffs:
l_diffs.append(max(row))
return max(l_diffs), diffs
# Initial conditions
for i in range(nx):
for j in range(ny):
x = i*dx
y = j*dy
u0[i,j] = x * (1-x**5) * y * (1-y)
u_exact[i,j] = get_exact(x, y, t_end, 10)
def do_timestep(u0, u):
# Propagate with forward-difference in time, central-difference in space
u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * (
(u0[2:, 1:-1] - 2*u0[1:-1, 1:-1] + u0[:-2, 1:-1])/dx2
+ (u0[1:-1, 2:] - 2*u0[1:-1, 1:-1] + u0[1:-1, :-2])/dy2 )
u0 = u.copy()
return u0, u
u0, u = do_timestep(u0, u)
l_inf_norm, norm_diff_vals = get_L_norm(u_exact, u0)
fig = plt.figure(1)
ax = fig.add_subplot(111)
im = ax.imshow(u.copy(), cmap=plt.get_cmap('hot'), vmin=0, vmax=0.06)
cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
ax.set_title('2D distribution after 80 time steps using FTCS')
plt.xlabel('x node [-]')
plt.ylabel('y node [-]')
fig.colorbar(im, cax=cbar_ax)
plt.savefig('./writeup/problem2_plot.png')
fig = plt.figure(2)
ax = fig.add_subplot(111)
ax.set_title('|f_exact - f_ftcs| Using FTCS')
plt.xlabel('x node [-]')
plt.ylabel('y node [-]')
im = ax.imshow(norm_diff_vals.copy(), cmap=plt.get_cmap('hot'), vmin=0,\
vmax=l_inf_norm)
cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('./writeup/problem2_error.png')
print("The L_infinity error for FTCS is: " + str(l_inf_norm))
|
normal
|
{
"blob_id": "c556aaf6aecb3c91d9574e0a158a9fa954108d70",
"index": 8193,
"step-1": "<mask token>\n\n\ndef get_exact(x, y, t, trunc):\n \"\"\"Get the exact solution at a set t\n \"\"\"\n Z = 0\n for n in range(1, trunc):\n for m in range(1, trunc):\n Z_num = -120 * ((-n) ** 4 * np.pi ** 4 * (-1) ** n + 12 * n ** \n 2 * np.pi ** 2 * (-1) ** n + 24 + 24 * (-1) ** (1 + n) * (-\n 2 + 2 * (-1) ** m))\n Z_num_xy = np.sin(n * x * np.pi) * np.sin(m * y * np.pi) * np.exp(\n -(n ** 2 + m ** 2) * np.pi ** 2 * k * t)\n Z_denom = n ** 7 * np.pi ** 10 * m ** 3\n Z += Z_num * Z_num_xy / Z_denom\n return Z\n\n\n<mask token>\n\n\ndef do_timestep(u0, u):\n u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * ((u0[2:, 1:-1] - 2 * u0[1:-1,\n 1:-1] + u0[:-2, 1:-1]) / dx2 + (u0[1:-1, 2:] - 2 * u0[1:-1, 1:-1] +\n u0[1:-1, :-2]) / dy2)\n u0 = u.copy()\n return u0, u\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_exact(x, y, t, trunc):\n \"\"\"Get the exact solution at a set t\n \"\"\"\n Z = 0\n for n in range(1, trunc):\n for m in range(1, trunc):\n Z_num = -120 * ((-n) ** 4 * np.pi ** 4 * (-1) ** n + 12 * n ** \n 2 * np.pi ** 2 * (-1) ** n + 24 + 24 * (-1) ** (1 + n) * (-\n 2 + 2 * (-1) ** m))\n Z_num_xy = np.sin(n * x * np.pi) * np.sin(m * y * np.pi) * np.exp(\n -(n ** 2 + m ** 2) * np.pi ** 2 * k * t)\n Z_denom = n ** 7 * np.pi ** 10 * m ** 3\n Z += Z_num * Z_num_xy / Z_denom\n return Z\n\n\ndef get_L_norm(exact, u):\n diffs = abs(exact - u)\n l_diffs = []\n for row in diffs:\n l_diffs.append(max(row))\n return max(l_diffs), diffs\n\n\nfor i in range(nx):\n for j in range(ny):\n x = i * dx\n y = j * dy\n u0[i, j] = x * (1 - x ** 5) * y * (1 - y)\n u_exact[i, j] = get_exact(x, y, t_end, 10)\n\n\ndef do_timestep(u0, u):\n u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * ((u0[2:, 1:-1] - 2 * u0[1:-1,\n 1:-1] + u0[:-2, 1:-1]) / dx2 + (u0[1:-1, 2:] - 2 * u0[1:-1, 1:-1] +\n u0[1:-1, :-2]) / dy2)\n u0 = u.copy()\n return u0, u\n\n\n<mask token>\nax.set_title('2D distribution after 80 time steps using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_plot.png')\n<mask token>\nax.set_title('|f_exact - f_ftcs| Using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\n<mask token>\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_error.png')\nprint('The L_infinity error for FTCS is: ' + str(l_inf_norm))\n",
"step-3": "<mask token>\nx_bound = y_bound = 1.0\ndx = dy = 0.05\nk = 0.1\nnx, ny = int(x_bound / dx), int(y_bound / dy)\ndx2, dy2 = dx * dx, dy * dy\ndt = dx2 / k / 4.0\nt_end = 80 * dt\nu0 = np.zeros((nx, ny))\nu_exact = np.zeros((nx, ny))\nu = np.zeros((nx, ny))\n\n\ndef get_exact(x, y, t, trunc):\n \"\"\"Get the exact solution at a set t\n \"\"\"\n Z = 0\n for n in range(1, trunc):\n for m in range(1, trunc):\n Z_num = -120 * ((-n) ** 4 * np.pi ** 4 * (-1) ** n + 12 * n ** \n 2 * np.pi ** 2 * (-1) ** n + 24 + 24 * (-1) ** (1 + n) * (-\n 2 + 2 * (-1) ** m))\n Z_num_xy = np.sin(n * x * np.pi) * np.sin(m * y * np.pi) * np.exp(\n -(n ** 2 + m ** 2) * np.pi ** 2 * k * t)\n Z_denom = n ** 7 * np.pi ** 10 * m ** 3\n Z += Z_num * Z_num_xy / Z_denom\n return Z\n\n\ndef get_L_norm(exact, u):\n diffs = abs(exact - u)\n l_diffs = []\n for row in diffs:\n l_diffs.append(max(row))\n return max(l_diffs), diffs\n\n\nfor i in range(nx):\n for j in range(ny):\n x = i * dx\n y = j * dy\n u0[i, j] = x * (1 - x ** 5) * y * (1 - y)\n u_exact[i, j] = get_exact(x, y, t_end, 10)\n\n\ndef do_timestep(u0, u):\n u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * ((u0[2:, 1:-1] - 2 * u0[1:-1,\n 1:-1] + u0[:-2, 1:-1]) / dx2 + (u0[1:-1, 2:] - 2 * u0[1:-1, 1:-1] +\n u0[1:-1, :-2]) / dy2)\n u0 = u.copy()\n return u0, u\n\n\nu0, u = do_timestep(u0, u)\nl_inf_norm, norm_diff_vals = get_L_norm(u_exact, u0)\nfig = plt.figure(1)\nax = fig.add_subplot(111)\nim = ax.imshow(u.copy(), cmap=plt.get_cmap('hot'), vmin=0, vmax=0.06)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nax.set_title('2D distribution after 80 time steps using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_plot.png')\nfig = plt.figure(2)\nax = fig.add_subplot(111)\nax.set_title('|f_exact - f_ftcs| Using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nim = ax.imshow(norm_diff_vals.copy(), cmap=plt.get_cmap('hot'), vmin=0,\n vmax=l_inf_norm)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_error.png')\nprint('The L_infinity error for FTCS is: ' + str(l_inf_norm))\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nx_bound = y_bound = 1.0\ndx = dy = 0.05\nk = 0.1\nnx, ny = int(x_bound / dx), int(y_bound / dy)\ndx2, dy2 = dx * dx, dy * dy\ndt = dx2 / k / 4.0\nt_end = 80 * dt\nu0 = np.zeros((nx, ny))\nu_exact = np.zeros((nx, ny))\nu = np.zeros((nx, ny))\n\n\ndef get_exact(x, y, t, trunc):\n \"\"\"Get the exact solution at a set t\n \"\"\"\n Z = 0\n for n in range(1, trunc):\n for m in range(1, trunc):\n Z_num = -120 * ((-n) ** 4 * np.pi ** 4 * (-1) ** n + 12 * n ** \n 2 * np.pi ** 2 * (-1) ** n + 24 + 24 * (-1) ** (1 + n) * (-\n 2 + 2 * (-1) ** m))\n Z_num_xy = np.sin(n * x * np.pi) * np.sin(m * y * np.pi) * np.exp(\n -(n ** 2 + m ** 2) * np.pi ** 2 * k * t)\n Z_denom = n ** 7 * np.pi ** 10 * m ** 3\n Z += Z_num * Z_num_xy / Z_denom\n return Z\n\n\ndef get_L_norm(exact, u):\n diffs = abs(exact - u)\n l_diffs = []\n for row in diffs:\n l_diffs.append(max(row))\n return max(l_diffs), diffs\n\n\nfor i in range(nx):\n for j in range(ny):\n x = i * dx\n y = j * dy\n u0[i, j] = x * (1 - x ** 5) * y * (1 - y)\n u_exact[i, j] = get_exact(x, y, t_end, 10)\n\n\ndef do_timestep(u0, u):\n u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * ((u0[2:, 1:-1] - 2 * u0[1:-1,\n 1:-1] + u0[:-2, 1:-1]) / dx2 + (u0[1:-1, 2:] - 2 * u0[1:-1, 1:-1] +\n u0[1:-1, :-2]) / dy2)\n u0 = u.copy()\n return u0, u\n\n\nu0, u = do_timestep(u0, u)\nl_inf_norm, norm_diff_vals = get_L_norm(u_exact, u0)\nfig = plt.figure(1)\nax = fig.add_subplot(111)\nim = ax.imshow(u.copy(), cmap=plt.get_cmap('hot'), vmin=0, vmax=0.06)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nax.set_title('2D distribution after 80 time steps using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_plot.png')\nfig = plt.figure(2)\nax = fig.add_subplot(111)\nax.set_title('|f_exact - f_ftcs| Using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nim = ax.imshow(norm_diff_vals.copy(), cmap=plt.get_cmap('hot'), vmin=0,\n vmax=l_inf_norm)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_error.png')\nprint('The L_infinity error for FTCS is: ' + str(l_inf_norm))\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n# some important constants \nx_bound = y_bound = 1.\ndx = dy = 0.05\nk = 0.1\nnx, ny = int(x_bound/dx), int(y_bound/dy)\ndx2, dy2 = dx*dx, dy*dy\ndt = (dx2 / k) / 4.0\nt_end = 80 * dt\n\n# set the grid\nu0 = np.zeros((nx, ny))\nu_exact = np.zeros((nx, ny))\nu = np.zeros((nx, ny))\n\ndef get_exact(x, y, t, trunc):\n \"\"\"Get the exact solution at a set t\n \"\"\"\n Z=0\n for n in range(1, trunc):\n for m in range(1, trunc):\n Z_num = -120 * ( ((-n)**4 * np.pi**4 * (-1)**n) +\\\n (12 * n**2 * np.pi ** 2 * (-1)**n)\\\n + 24 + (24 * (-1)**(1+n))\\\n *(-2 + (2*(-1)**m) ) )\n Z_num_xy = np.sin(n*x*np.pi)*np.sin(m*y*np.pi)\\\n * np.exp(-(n**2 + m**2) * np.pi**2 * k * t)\n Z_denom = n**7 * np.pi**10 * m**3\n\n Z += Z_num * Z_num_xy / Z_denom\n \n return Z\n\ndef get_L_norm(exact, u):\n \n diffs = abs(exact - u)\n l_diffs = []\n for row in diffs:\n l_diffs.append(max(row))\n return max(l_diffs), diffs\n\n# Initial conditions\n\nfor i in range(nx):\n for j in range(ny):\n x = i*dx\n y = j*dy\n u0[i,j] = x * (1-x**5) * y * (1-y)\n u_exact[i,j] = get_exact(x, y, t_end, 10)\n\n\ndef do_timestep(u0, u):\n # Propagate with forward-difference in time, central-difference in space\n u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * (\n (u0[2:, 1:-1] - 2*u0[1:-1, 1:-1] + u0[:-2, 1:-1])/dx2\n + (u0[1:-1, 2:] - 2*u0[1:-1, 1:-1] + u0[1:-1, :-2])/dy2 )\n\n u0 = u.copy()\n return u0, u\n\nu0, u = do_timestep(u0, u)\nl_inf_norm, norm_diff_vals = get_L_norm(u_exact, u0)\n\nfig = plt.figure(1)\nax = fig.add_subplot(111)\nim = ax.imshow(u.copy(), cmap=plt.get_cmap('hot'), vmin=0, vmax=0.06)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nax.set_title('2D distribution after 80 time steps using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_plot.png')\n\nfig = plt.figure(2)\nax = fig.add_subplot(111)\nax.set_title('|f_exact - f_ftcs| Using FTCS')\nplt.xlabel('x node [-]')\nplt.ylabel('y node [-]')\nim = ax.imshow(norm_diff_vals.copy(), cmap=plt.get_cmap('hot'), vmin=0,\\\n vmax=l_inf_norm)\ncbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])\nfig.colorbar(im, cax=cbar_ax)\nplt.savefig('./writeup/problem2_error.png')\n\nprint(\"The L_infinity error for FTCS is: \" + str(l_inf_norm))\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#!/usr/bin/env python
import errno
import logging
import os
import re
import sys
import argparse
def parse_map(map_str):
file_map = []
for line in map_str.split('\n'):
if not line:
continue
find, replace = line.split(' -- ', 1)
file_map.append((find, replace))
return file_map
def map_file(file_map, d, f):
for find, repl in file_map:
if '/' in find:
source = os.path.join(d, f)
includes_path = True
else:
source = f
includes_path = False
match = re.match(find, source)
if match:
if repl == '!ignore':
return None
ret = re.sub(find, repl, source)
if includes_path:
return ret
else:
return os.path.join(d, ret)
else:
raise ValueError('File {} does not match any rules.'.format(f))
def install_file(source, dest):
dest = os.path.expanduser(dest)
logging.debug('Processing {}'.format(source))
try:
dirname = os.path.dirname(dest)
if dirname:
os.makedirs(dirname)
except OSError as e:
# Error 'File Exists' is ok, all others are a problem.
if e.errno != errno.EEXIST:
raise
if os.path.exists(dest):
if CONFIG.force:
os.unlink(dest)
elif os.path.samefile(source, dest):
return True
else:
logging.warning('Not replacing existing file {} with {}.'.format(dest, source))
return False
logging.info('Linking {} to {}'.format(source, dest))
if not CONFIG.noop:
os.link(source, dest)
class ChangeDir(object):
def __init__(self, path):
self.path = path
self.olddir = os.path.curdir
def __enter__(self):
self.olddir = os.path.curdir
os.chdir(self.path)
def __exit__(self, *args):
os.chdir(self.olddir)
def clamp(n, bottom, top):
return min(max(bottom, n), top)
CONFIG = None
def loadConfig():
global CONFIG
parser = argparse.ArgumentParser(description='Install dotfiles.')
parser.add_argument('-n', '--noop', action='store_true')
parser.add_argument('-v', '--verbose', action='append_const', const=1)
parser.add_argument('-q', '--quiet', action='append_const', dest='verbose', const=-1)
parser.add_argument('-f', '--force', action='store_true')
opt = parser.parse_args()
opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)
CONFIG = opt
def main():
loadConfig()
log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(level=log_levels[CONFIG.verbose])
if CONFIG.noop:
logging.info('Running in no-op mode')
try:
with open('map') as f:
map_str = f.read()
except IOError:
logging.error('Could not open map file.')
sys.exit(1)
file_map = parse_map(map_str)
with ChangeDir('configs'):
for root, dirs, files in os.walk('.'):
# Remove leading ./ or .
root = re.sub(r'^./?', '', root)
for f in files:
try:
dest = map_file(file_map, root, f)
if dest is not None:
install_file(os.path.join(root, f), dest)
except ValueError:
logging.error('File "{}" does not match any rules.'.format(f))
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__))
main()
|
normal
|
{
"blob_id": "03d07f5f4647e904c288e828b8f8e7de35740054",
"index": 3737,
"step-1": "<mask token>\n\n\ndef map_file(file_map, d, f):\n for find, repl in file_map:\n if '/' in find:\n source = os.path.join(d, f)\n includes_path = True\n else:\n source = f\n includes_path = False\n match = re.match(find, source)\n if match:\n if repl == '!ignore':\n return None\n ret = re.sub(find, repl, source)\n if includes_path:\n return ret\n else:\n return os.path.join(d, ret)\n else:\n raise ValueError('File {} does not match any rules.'.format(f))\n\n\ndef install_file(source, dest):\n dest = os.path.expanduser(dest)\n logging.debug('Processing {}'.format(source))\n try:\n dirname = os.path.dirname(dest)\n if dirname:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if os.path.exists(dest):\n if CONFIG.force:\n os.unlink(dest)\n elif os.path.samefile(source, dest):\n return True\n else:\n logging.warning('Not replacing existing file {} with {}.'.\n format(dest, source))\n return False\n logging.info('Linking {} to {}'.format(source, dest))\n if not CONFIG.noop:\n os.link(source, dest)\n\n\nclass ChangeDir(object):\n\n def __init__(self, path):\n self.path = path\n self.olddir = os.path.curdir\n\n def __enter__(self):\n self.olddir = os.path.curdir\n os.chdir(self.path)\n\n def __exit__(self, *args):\n os.chdir(self.olddir)\n\n\n<mask token>\n\n\ndef loadConfig():\n global CONFIG\n parser = argparse.ArgumentParser(description='Install dotfiles.')\n parser.add_argument('-n', '--noop', action='store_true')\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-q', '--quiet', action='append_const', dest=\n 'verbose', const=-1)\n parser.add_argument('-f', '--force', action='store_true')\n opt = parser.parse_args()\n opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)\n CONFIG = opt\n\n\ndef main():\n loadConfig()\n log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging\n .INFO, logging.DEBUG]\n logging.basicConfig(level=log_levels[CONFIG.verbose])\n if CONFIG.noop:\n logging.info('Running in no-op mode')\n try:\n with open('map') as f:\n map_str = f.read()\n except IOError:\n logging.error('Could not open map file.')\n sys.exit(1)\n file_map = parse_map(map_str)\n with ChangeDir('configs'):\n for root, dirs, files in os.walk('.'):\n root = re.sub('^./?', '', root)\n for f in files:\n try:\n dest = map_file(file_map, root, f)\n if dest is not None:\n install_file(os.path.join(root, f), dest)\n except ValueError:\n logging.error('File \"{}\" does not match any rules.'.\n format(f))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_map(map_str):\n file_map = []\n for line in map_str.split('\\n'):\n if not line:\n continue\n find, replace = line.split(' -- ', 1)\n file_map.append((find, replace))\n return file_map\n\n\ndef map_file(file_map, d, f):\n for find, repl in file_map:\n if '/' in find:\n source = os.path.join(d, f)\n includes_path = True\n else:\n source = f\n includes_path = False\n match = re.match(find, source)\n if match:\n if repl == '!ignore':\n return None\n ret = re.sub(find, repl, source)\n if includes_path:\n return ret\n else:\n return os.path.join(d, ret)\n else:\n raise ValueError('File {} does not match any rules.'.format(f))\n\n\ndef install_file(source, dest):\n dest = os.path.expanduser(dest)\n logging.debug('Processing {}'.format(source))\n try:\n dirname = os.path.dirname(dest)\n if dirname:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if os.path.exists(dest):\n if CONFIG.force:\n os.unlink(dest)\n elif os.path.samefile(source, dest):\n return True\n else:\n logging.warning('Not replacing existing file {} with {}.'.\n format(dest, source))\n return False\n logging.info('Linking {} to {}'.format(source, dest))\n if not CONFIG.noop:\n os.link(source, dest)\n\n\nclass ChangeDir(object):\n\n def __init__(self, path):\n self.path = path\n self.olddir = os.path.curdir\n\n def __enter__(self):\n self.olddir = os.path.curdir\n os.chdir(self.path)\n\n def __exit__(self, *args):\n os.chdir(self.olddir)\n\n\ndef clamp(n, bottom, top):\n return min(max(bottom, n), top)\n\n\n<mask token>\n\n\ndef loadConfig():\n global CONFIG\n parser = argparse.ArgumentParser(description='Install dotfiles.')\n parser.add_argument('-n', '--noop', action='store_true')\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-q', '--quiet', action='append_const', dest=\n 'verbose', const=-1)\n parser.add_argument('-f', '--force', action='store_true')\n opt = parser.parse_args()\n opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)\n CONFIG = opt\n\n\ndef main():\n loadConfig()\n log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging\n .INFO, logging.DEBUG]\n logging.basicConfig(level=log_levels[CONFIG.verbose])\n if CONFIG.noop:\n logging.info('Running in no-op mode')\n try:\n with open('map') as f:\n map_str = f.read()\n except IOError:\n logging.error('Could not open map file.')\n sys.exit(1)\n file_map = parse_map(map_str)\n with ChangeDir('configs'):\n for root, dirs, files in os.walk('.'):\n root = re.sub('^./?', '', root)\n for f in files:\n try:\n dest = map_file(file_map, root, f)\n if dest is not None:\n install_file(os.path.join(root, f), dest)\n except ValueError:\n logging.error('File \"{}\" does not match any rules.'.\n format(f))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_map(map_str):\n file_map = []\n for line in map_str.split('\\n'):\n if not line:\n continue\n find, replace = line.split(' -- ', 1)\n file_map.append((find, replace))\n return file_map\n\n\ndef map_file(file_map, d, f):\n for find, repl in file_map:\n if '/' in find:\n source = os.path.join(d, f)\n includes_path = True\n else:\n source = f\n includes_path = False\n match = re.match(find, source)\n if match:\n if repl == '!ignore':\n return None\n ret = re.sub(find, repl, source)\n if includes_path:\n return ret\n else:\n return os.path.join(d, ret)\n else:\n raise ValueError('File {} does not match any rules.'.format(f))\n\n\ndef install_file(source, dest):\n dest = os.path.expanduser(dest)\n logging.debug('Processing {}'.format(source))\n try:\n dirname = os.path.dirname(dest)\n if dirname:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if os.path.exists(dest):\n if CONFIG.force:\n os.unlink(dest)\n elif os.path.samefile(source, dest):\n return True\n else:\n logging.warning('Not replacing existing file {} with {}.'.\n format(dest, source))\n return False\n logging.info('Linking {} to {}'.format(source, dest))\n if not CONFIG.noop:\n os.link(source, dest)\n\n\nclass ChangeDir(object):\n\n def __init__(self, path):\n self.path = path\n self.olddir = os.path.curdir\n\n def __enter__(self):\n self.olddir = os.path.curdir\n os.chdir(self.path)\n\n def __exit__(self, *args):\n os.chdir(self.olddir)\n\n\ndef clamp(n, bottom, top):\n return min(max(bottom, n), top)\n\n\n<mask token>\n\n\ndef loadConfig():\n global CONFIG\n parser = argparse.ArgumentParser(description='Install dotfiles.')\n parser.add_argument('-n', '--noop', action='store_true')\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-q', '--quiet', action='append_const', dest=\n 'verbose', const=-1)\n parser.add_argument('-f', '--force', action='store_true')\n opt = parser.parse_args()\n opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)\n CONFIG = opt\n\n\ndef main():\n loadConfig()\n log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging\n .INFO, logging.DEBUG]\n logging.basicConfig(level=log_levels[CONFIG.verbose])\n if CONFIG.noop:\n logging.info('Running in no-op mode')\n try:\n with open('map') as f:\n map_str = f.read()\n except IOError:\n logging.error('Could not open map file.')\n sys.exit(1)\n file_map = parse_map(map_str)\n with ChangeDir('configs'):\n for root, dirs, files in os.walk('.'):\n root = re.sub('^./?', '', root)\n for f in files:\n try:\n dest = map_file(file_map, root, f)\n if dest is not None:\n install_file(os.path.join(root, f), dest)\n except ValueError:\n logging.error('File \"{}\" does not match any rules.'.\n format(f))\n\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(__file__))\n main()\n",
"step-4": "<mask token>\n\n\ndef parse_map(map_str):\n file_map = []\n for line in map_str.split('\\n'):\n if not line:\n continue\n find, replace = line.split(' -- ', 1)\n file_map.append((find, replace))\n return file_map\n\n\ndef map_file(file_map, d, f):\n for find, repl in file_map:\n if '/' in find:\n source = os.path.join(d, f)\n includes_path = True\n else:\n source = f\n includes_path = False\n match = re.match(find, source)\n if match:\n if repl == '!ignore':\n return None\n ret = re.sub(find, repl, source)\n if includes_path:\n return ret\n else:\n return os.path.join(d, ret)\n else:\n raise ValueError('File {} does not match any rules.'.format(f))\n\n\ndef install_file(source, dest):\n dest = os.path.expanduser(dest)\n logging.debug('Processing {}'.format(source))\n try:\n dirname = os.path.dirname(dest)\n if dirname:\n os.makedirs(dirname)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n if os.path.exists(dest):\n if CONFIG.force:\n os.unlink(dest)\n elif os.path.samefile(source, dest):\n return True\n else:\n logging.warning('Not replacing existing file {} with {}.'.\n format(dest, source))\n return False\n logging.info('Linking {} to {}'.format(source, dest))\n if not CONFIG.noop:\n os.link(source, dest)\n\n\nclass ChangeDir(object):\n\n def __init__(self, path):\n self.path = path\n self.olddir = os.path.curdir\n\n def __enter__(self):\n self.olddir = os.path.curdir\n os.chdir(self.path)\n\n def __exit__(self, *args):\n os.chdir(self.olddir)\n\n\ndef clamp(n, bottom, top):\n return min(max(bottom, n), top)\n\n\nCONFIG = None\n\n\ndef loadConfig():\n global CONFIG\n parser = argparse.ArgumentParser(description='Install dotfiles.')\n parser.add_argument('-n', '--noop', action='store_true')\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-q', '--quiet', action='append_const', dest=\n 'verbose', const=-1)\n parser.add_argument('-f', '--force', action='store_true')\n opt = parser.parse_args()\n opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)\n CONFIG = opt\n\n\ndef main():\n loadConfig()\n log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging\n .INFO, logging.DEBUG]\n logging.basicConfig(level=log_levels[CONFIG.verbose])\n if CONFIG.noop:\n logging.info('Running in no-op mode')\n try:\n with open('map') as f:\n map_str = f.read()\n except IOError:\n logging.error('Could not open map file.')\n sys.exit(1)\n file_map = parse_map(map_str)\n with ChangeDir('configs'):\n for root, dirs, files in os.walk('.'):\n root = re.sub('^./?', '', root)\n for f in files:\n try:\n dest = map_file(file_map, root, f)\n if dest is not None:\n install_file(os.path.join(root, f), dest)\n except ValueError:\n logging.error('File \"{}\" does not match any rules.'.\n format(f))\n\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(__file__))\n main()\n",
"step-5": "#!/usr/bin/env python\nimport errno\nimport logging\nimport os\nimport re\nimport sys\nimport argparse\n\n\ndef parse_map(map_str):\n file_map = []\n for line in map_str.split('\\n'):\n if not line:\n continue\n\n find, replace = line.split(' -- ', 1)\n file_map.append((find, replace))\n return file_map\n\n\ndef map_file(file_map, d, f):\n for find, repl in file_map:\n if '/' in find:\n source = os.path.join(d, f)\n includes_path = True\n else:\n source = f\n includes_path = False\n\n match = re.match(find, source)\n\n if match:\n if repl == '!ignore':\n return None\n ret = re.sub(find, repl, source)\n\n if includes_path:\n return ret\n else:\n return os.path.join(d, ret)\n else:\n raise ValueError('File {} does not match any rules.'.format(f))\n\n\ndef install_file(source, dest):\n dest = os.path.expanduser(dest)\n logging.debug('Processing {}'.format(source))\n\n try:\n dirname = os.path.dirname(dest)\n if dirname:\n os.makedirs(dirname)\n except OSError as e:\n # Error 'File Exists' is ok, all others are a problem.\n if e.errno != errno.EEXIST:\n raise\n\n if os.path.exists(dest):\n if CONFIG.force:\n os.unlink(dest)\n elif os.path.samefile(source, dest):\n return True\n else:\n logging.warning('Not replacing existing file {} with {}.'.format(dest, source))\n return False\n\n logging.info('Linking {} to {}'.format(source, dest))\n if not CONFIG.noop:\n os.link(source, dest)\n\n\nclass ChangeDir(object):\n def __init__(self, path):\n self.path = path\n self.olddir = os.path.curdir\n\n def __enter__(self):\n self.olddir = os.path.curdir\n os.chdir(self.path)\n\n def __exit__(self, *args):\n os.chdir(self.olddir)\n\n\ndef clamp(n, bottom, top):\n return min(max(bottom, n), top)\n\n\nCONFIG = None\n\ndef loadConfig():\n global CONFIG\n parser = argparse.ArgumentParser(description='Install dotfiles.')\n parser.add_argument('-n', '--noop', action='store_true')\n parser.add_argument('-v', '--verbose', action='append_const', const=1)\n parser.add_argument('-q', '--quiet', action='append_const', dest='verbose', const=-1)\n parser.add_argument('-f', '--force', action='store_true')\n\n opt = parser.parse_args()\n\n opt.verbose = clamp(2 + sum(opt.verbose or [0]), 0, 4)\n\n CONFIG = opt\n\n\ndef main():\n loadConfig()\n\n log_levels = [logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]\n logging.basicConfig(level=log_levels[CONFIG.verbose])\n\n if CONFIG.noop:\n logging.info('Running in no-op mode')\n\n\n try:\n with open('map') as f:\n map_str = f.read()\n except IOError:\n logging.error('Could not open map file.')\n sys.exit(1)\n\n file_map = parse_map(map_str)\n\n with ChangeDir('configs'):\n for root, dirs, files in os.walk('.'):\n # Remove leading ./ or .\n root = re.sub(r'^./?', '', root)\n for f in files:\n try:\n dest = map_file(file_map, root, f)\n if dest is not None:\n install_file(os.path.join(root, f), dest)\n except ValueError:\n logging.error('File \"{}\" does not match any rules.'.format(f))\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(__file__))\n main()\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
import requests
import json
def get():
market = 'Premium'
url = 'https://coinpremiums.herokuapp.com/json'
try:
result = ""
premiums = requests.get(url).json()
for exchange, exchange_currencies in premiums['premium'].items():
result += '[[{} | '.format(exchange.title())
_sum = 0
_cnt = 0
for currency_name, currency in exchange_currencies.items():
premium = currency['raw'] - 1
result += '[{}] {:.2%} '.format(currency_name.upper(), premium)
_cnt += 1
_sum += premium
result += '[평균] {:.2%} ]] '.format(_sum / _cnt)
except Exception as e:
result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())
return result
|
normal
|
{
"blob_id": "b5581be044013df9ff812f285f99ca67c4f96a62",
"index": 2927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n",
"step-3": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n try:\n result = ''\n premiums = requests.get(url).json()\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.\n __repr__())\n return result\n",
"step-4": "import requests\nimport json\n\n\ndef get():\n market = 'Premium'\n url = 'https://coinpremiums.herokuapp.com/json'\n\n try:\n result = \"\"\n premiums = requests.get(url).json()\n\n for exchange, exchange_currencies in premiums['premium'].items():\n result += '[[{} | '.format(exchange.title())\n _sum = 0\n _cnt = 0\n for currency_name, currency in exchange_currencies.items():\n premium = currency['raw'] - 1\n result += '[{}] {:.2%} '.format(currency_name.upper(), premium)\n _cnt += 1\n _sum += premium\n result += '[평균] {:.2%} ]] '.format(_sum / _cnt)\n except Exception as e:\n result = '[{market}] 에러! : {msg}'.format(market=market, msg=e.__repr__())\n\n return result\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
# print(robjects.__file__)
import sys
sys.path.append('./')
import importlib
import json
import os
from web_app.function.WordCould import word_img
# importlib.reload(sys)
# #sys.setdefaultencoding('gbk')
class Ubiquitination():
def __init__(self,disease,path):
self.disease=disease
path=path
self.num=11
# print('泛素化',self.disease,path)
self.datas_path=self.data_path(self.disease)
self.save=self.save_path(path,self.disease)
# self.analysis(self.datas_path,self.disease,self.save)
def load_R(self):
pass
def data_path(self,name):
exp_path='./web_app/data/disease/exp_data/{}.txt'.format(name)
clinical_path='./web_app/data/disease/clinical/{}.txt'.format(name)
ubiquitina_path='./web_app/data/data/ubiq/UbiqGene.txt'
# print(exp_path)
return (exp_path,clinical_path,ubiquitina_path)
def save_path(self,path,disease):
path=path
disease=disease
sp=path+'/Ubiquitination/'
if not os.path.exists(sp):
os.makedirs(sp)
sp=sp+disease+'/'
if not os.path.exists(sp):
os.makedirs(sp)
# print(sp)
return sp
def analysis(self,data,name,save_path):
data_path=data
name=name
save_path=save_path
# print(data_path[0],'TCGA-BRCA',save_path)
lime_all='./web_app/data/Difference/{}/limma_DEG_all.csv'.format(name)
lime_n='./web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)
ubiq='./web_app/data/data/ubiq/UbiqGene.txt'
pheno='./web_app/data/Difference/{}/pheno.csv'.format(name)
exp_data='./web_app/data/disease/exp_data/{}.csv'.format(name)
cli='./web_app/data/disease/clinical/{}.csv'.format(name)
return (lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path)
fig1_result=self.fig1(lime_all,lime_n,ubiq,pheno,exp_data,cli,usa,save_path)
# print(multiple[0])
# print(single[0],single[1])
def fig1(self,lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path):
lime_all=lime_all
lime_n=lime_n
ubiq=ubiq
pheno=pheno
exp_data=exp_data
cli=cli
save_path=save_path+'Fig1/'
if not os.path.exists(save_path):
os.makedirs(save_path)
r=robjects.r
# 加载差异分析文件
r.source('./web_app/script/Conversion_Difference.r')
r.source('./web_app/script/Single.r')
r.source('./web_app/script/Survival_.r')
r.source('./web_app/script/RelatedBubbles.r')
# 调用差异分析函数完成差异分析
# 构建差异基因,绘制差异火山图,热图
difference=r.Difference(lime_all,lime_n,ubiq,pheno,exp_data,save_path)
# print(difference[0],difference[1])
# 单多因素分析
single=r.SingleFactor(cli,exp_data,difference[0],save_path)
# # print([i for i in single])
survival=r.Survival_(single[0],single[1],difference[0],pheno,cli,save_path)
# survival=r.Survival_(single[0],single[1],difference[0],pheno,save_path)
# # # print([i for i in survival])
# # # # 相关性气泡图
bubble=r.RelatedBubbles(survival[0],cli,save_path)
word_img(single[1],save_path)
# print([i for i in bubble])
result={
'code':1,
'difference':[i for i in difference],
'single':[i for i in single],
'survival':[i for i in survival],
'bubble':[i for i in bubble],
}
return result
def fig2(self,save_path):
# save_path=save_path+'Fig2/'
# if not os.path.exists(save_path):
# os.makedirs(save_path)
r=robjects.r
# 加载差异分析文件
r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')
result={
'code':2,
}
return result
# 条带热图
# r.source('./web_app/script/Heatmap.r')
# r.Heatmap('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# "./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/Cox_genes_OS_pValue.csv",
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
# Lasso折线图
# r.source('./web_app/script/LineLasso.r')
# r.LineLasso(
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/CoxSingle_train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/'
# )
# # 发散曲线
# r.source('./web_app/script/CurveLasso.r')
# r.CurveLasso('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
# # 随机生存森林
# r.source('./web_app/script/RandomSurvivalForest.r')
# r.Rsf('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',
# './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')
def epath(self):
return self.save
def progress(self):
return 1
if __name__ == "__main__":
a=Ubiquitination('TCGA-LIHC','./web_app/temp/Arsz')
x=a.analysis(a.datas_path,a.disease,a.save)
f1=a.fig1(x[0],x[1],x[2],x[3],x[4],x[5],x[6])
|
normal
|
{
"blob_id": "a6ae4324580a8471969e0229c02ea1670728f25b",
"index": 3767,
"step-1": "<mask token>\n\n\nclass Ubiquitination:\n <mask token>\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n <mask token>\n <mask token>\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ubiquitination:\n <mask token>\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n <mask token>\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ubiquitination:\n\n def __init__(self, disease, path):\n self.disease = disease\n path = path\n self.num = 11\n self.datas_path = self.data_path(self.disease)\n self.save = self.save_path(path, self.disease)\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n\n def fig2(self, save_path):\n r = robjects.r\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n result = {'code': 2}\n return result\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\n<mask token>\n",
"step-4": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nimport sys\nsys.path.append('./')\nimport importlib\nimport json\nimport os\nfrom web_app.function.WordCould import word_img\n\n\nclass Ubiquitination:\n\n def __init__(self, disease, path):\n self.disease = disease\n path = path\n self.num = 11\n self.datas_path = self.data_path(self.disease)\n self.save = self.save_path(path, self.disease)\n\n def load_R(self):\n pass\n\n def data_path(self, name):\n exp_path = './web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path = './web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path = './web_app/data/data/ubiq/UbiqGene.txt'\n return exp_path, clinical_path, ubiquitina_path\n\n def save_path(self, path, disease):\n path = path\n disease = disease\n sp = path + '/Ubiquitination/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n sp = sp + disease + '/'\n if not os.path.exists(sp):\n os.makedirs(sp)\n return sp\n\n def analysis(self, data, name, save_path):\n data_path = data\n name = name\n save_path = save_path\n lime_all = './web_app/data/Difference/{}/limma_DEG_all.csv'.format(name\n )\n lime_n = './web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq = './web_app/data/data/ubiq/UbiqGene.txt'\n pheno = './web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data = './web_app/data/disease/exp_data/{}.csv'.format(name)\n cli = './web_app/data/disease/clinical/{}.csv'.format(name)\n return lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path\n fig1_result = self.fig1(lime_all, lime_n, ubiq, pheno, exp_data,\n cli, usa, save_path)\n\n def fig1(self, lime_all, lime_n, ubiq, pheno, exp_data, cli, save_path):\n lime_all = lime_all\n lime_n = lime_n\n ubiq = ubiq\n pheno = pheno\n exp_data = exp_data\n cli = cli\n save_path = save_path + 'Fig1/'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n r = robjects.r\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n difference = r.Difference(lime_all, lime_n, ubiq, pheno, exp_data,\n save_path)\n single = r.SingleFactor(cli, exp_data, difference[0], save_path)\n survival = r.Survival_(single[0], single[1], difference[0], pheno,\n cli, save_path)\n bubble = r.RelatedBubbles(survival[0], cli, save_path)\n word_img(single[1], save_path)\n result = {'code': 1, 'difference': [i for i in difference],\n 'single': [i for i in single], 'survival': [i for i in survival\n ], 'bubble': [i for i in bubble]}\n return result\n\n def fig2(self, save_path):\n r = robjects.r\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n result = {'code': 2}\n return result\n\n def epath(self):\n return self.save\n\n def progress(self):\n return 1\n\n\nif __name__ == '__main__':\n a = Ubiquitination('TCGA-LIHC', './web_app/temp/Arsz')\n x = a.analysis(a.datas_path, a.disease, a.save)\n f1 = a.fig1(x[0], x[1], x[2], x[3], x[4], x[5], x[6])\n",
"step-5": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\n# print(robjects.__file__)\nimport sys\nsys.path.append('./')\nimport importlib\nimport json\nimport os\nfrom web_app.function.WordCould import word_img\n# importlib.reload(sys)\n# #sys.setdefaultencoding('gbk')\n\n\nclass Ubiquitination():\n\n def __init__(self,disease,path):\n\n self.disease=disease\n path=path\n self.num=11\n # print('泛素化',self.disease,path)\n self.datas_path=self.data_path(self.disease)\n self.save=self.save_path(path,self.disease)\n\n # self.analysis(self.datas_path,self.disease,self.save)\n\n def load_R(self):\n pass\n\n def data_path(self,name):\n\n exp_path='./web_app/data/disease/exp_data/{}.txt'.format(name)\n clinical_path='./web_app/data/disease/clinical/{}.txt'.format(name)\n ubiquitina_path='./web_app/data/data/ubiq/UbiqGene.txt'\n # print(exp_path)\n return (exp_path,clinical_path,ubiquitina_path)\n\n def save_path(self,path,disease):\n path=path\n disease=disease\n\n sp=path+'/Ubiquitination/'\n\n if not os.path.exists(sp):\n os.makedirs(sp)\n \n sp=sp+disease+'/'\n \n if not os.path.exists(sp):\n os.makedirs(sp)\n \n # print(sp)\n return sp\n\n def analysis(self,data,name,save_path):\n \n data_path=data\n name=name\n save_path=save_path\n # print(data_path[0],'TCGA-BRCA',save_path)\n\n lime_all='./web_app/data/Difference/{}/limma_DEG_all.csv'.format(name)\n lime_n='./web_app/data/Difference/{}/limma_DEG_0.05.csv'.format(name)\n ubiq='./web_app/data/data/ubiq/UbiqGene.txt'\n pheno='./web_app/data/Difference/{}/pheno.csv'.format(name)\n exp_data='./web_app/data/disease/exp_data/{}.csv'.format(name)\n cli='./web_app/data/disease/clinical/{}.csv'.format(name)\n\n return (lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path)\n\n fig1_result=self.fig1(lime_all,lime_n,ubiq,pheno,exp_data,cli,usa,save_path)\n\n \n \n\n\n # print(multiple[0])\n # print(single[0],single[1])\n\n def fig1(self,lime_all,lime_n,ubiq,pheno,exp_data,cli,save_path):\n\n lime_all=lime_all\n lime_n=lime_n\n ubiq=ubiq\n pheno=pheno\n exp_data=exp_data\n cli=cli\n save_path=save_path+'Fig1/'\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n r=robjects.r\n # 加载差异分析文件\n r.source('./web_app/script/Conversion_Difference.r')\n r.source('./web_app/script/Single.r')\n r.source('./web_app/script/Survival_.r')\n r.source('./web_app/script/RelatedBubbles.r')\n # 调用差异分析函数完成差异分析\n # 构建差异基因,绘制差异火山图,热图\n difference=r.Difference(lime_all,lime_n,ubiq,pheno,exp_data,save_path)\n # print(difference[0],difference[1])\n\n # 单多因素分析\n single=r.SingleFactor(cli,exp_data,difference[0],save_path)\n # # print([i for i in single])\n \n\n survival=r.Survival_(single[0],single[1],difference[0],pheno,cli,save_path)\n # survival=r.Survival_(single[0],single[1],difference[0],pheno,save_path)\n # # # print([i for i in survival])\n # # # # 相关性气泡图\n \n bubble=r.RelatedBubbles(survival[0],cli,save_path)\n\n word_img(single[1],save_path)\n # print([i for i in bubble])\n\n result={\n 'code':1,\n 'difference':[i for i in difference],\n 'single':[i for i in single],\n 'survival':[i for i in survival],\n 'bubble':[i for i in bubble],\n }\n return result\n\n def fig2(self,save_path):\n # save_path=save_path+'Fig2/'\n\n # if not os.path.exists(save_path):\n # os.makedirs(save_path)\n\n r=robjects.r\n # 加载差异分析文件\n r.source('./web_app/script/GeneSurvivalModel/Heatmap.r')\n\n result={\n 'code':2,\n \n }\n return result\n # 条带热图\n # r.source('./web_app/script/Heatmap.r')\n # r.Heatmap('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # \"./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/Cox_genes_OS_pValue.csv\",\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n # Lasso折线图\n # r.source('./web_app/script/LineLasso.r')\n # r.LineLasso(\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/CoxSingle_train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/'\n # )\n # # 发散曲线\n # r.source('./web_app/script/CurveLasso.r')\n # r.CurveLasso('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n # # 随机生存森林\n # r.source('./web_app/script/RandomSurvivalForest.r')\n # r.Rsf('./web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/train.csv',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/signature_gene.txt',\n # './web_app/temp/Arsz/Ubiquitination/TCGA-BRCA/')\n\n def epath(self):\n return self.save\n def progress(self):\n\n return 1\nif __name__ == \"__main__\":\n a=Ubiquitination('TCGA-LIHC','./web_app/temp/Arsz')\n x=a.analysis(a.datas_path,a.disease,a.save)\n f1=a.fig1(x[0],x[1],x[2],x[3],x[4],x[5],x[6])\n",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
# coding=utf-8
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Example
Given a binary tree as follow:
1
/ \
2 3
/ \
4 5
The maximum depth is 3.
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: An integer
"""
def maxDepth(self, root):
# write your code here
if not root:
return 0
return max(self.maximum(root.left),self.maximum(root.right))+1
|
normal
|
{
"blob_id": "262d6722f4c158d0a41b22433792cdc35651d156",
"index": 9459,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-3": "<mask token>\n\n\nclass TreeNode:\n <mask token>\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-4": "<mask token>\n\n\nclass TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\"\n\n def maxDepth(self, root):\n if not root:\n return 0\n return max(self.maximum(root.left), self.maximum(root.right)) + 1\n",
"step-5": "# coding=utf-8\n\n\"\"\"\nGiven a binary tree, find its maximum depth.\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\nExample\nGiven a binary tree as follow:\n\n 1\n / \\ \n2 3\n / \\\n 4 5\nThe maximum depth is 3.\n\n\"\"\"\n\n\"\"\"\nDefinition of TreeNode:\n\"\"\"\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: An integer\n \"\"\" \n def maxDepth(self, root):\n # write your code here\n if not root:\n \treturn 0\n return max(self.maximum(root.left),self.maximum(root.right))+1",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# CSE 415 Winter 2019
# Assignment 1
# Jichun Li 1531264
# Part A
# 1
def five_x_cubed_plus_1(x):
return 5 * (x ** 3) + 1
#2
def pair_off(ary):
result = []
for i in range(0, int(len(ary) / 2 * 2), 2):
result.append([ary[i], ary[i + 1]])
if (int (len(ary) % 2) == 1):
result.append([ary[-1]])
return result
#3
def mystery_code(input_string):
result = ''
for c in input_string:
next_char = c
if str.isalpha(c):
if c.upper() < 'H':
if c.islower():
next_char = chr(ord(c) + 19).upper()
else:
next_char = chr(ord(c) + 19).lower()
else:
if c.islower():
next_char = chr(ord(c) - 7).upper()
else:
next_char = chr(ord(c) - 7).lower()
result = result + next_char
return result
#4
def past_tense(words):
result = []
irregular_dict = {'have':'had',
'be':'was',
'eat':'ate',
'go':'went'}
for word in words:
word = str.lower(word)
if word in irregular_dict.keys():
result.append(irregular_dict[word])
elif word[-1] is 'e':
result.append(word + 'd')
elif word[-1] is 'y' and word[-2] not in 'aeiou':
result.append(word[:-1] + 'ied')
elif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3] not in 'aeiou':
result.append(word + word[-1] + 'ed')
else:
result.append(word + 'ed')
return result
|
normal
|
{
"blob_id": "681788ffe7672458e8d334316aa87936746352b1",
"index": 4054,
"step-1": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\n<mask token>\n",
"step-2": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\n<mask token>\n",
"step-3": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\ndef mystery_code(input_string):\n result = ''\n for c in input_string:\n next_char = c\n if str.isalpha(c):\n if c.upper() < 'H':\n if c.islower():\n next_char = chr(ord(c) + 19).upper()\n else:\n next_char = chr(ord(c) + 19).lower()\n elif c.islower():\n next_char = chr(ord(c) - 7).upper()\n else:\n next_char = chr(ord(c) - 7).lower()\n result = result + next_char\n return result\n\n\n<mask token>\n",
"step-4": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\ndef mystery_code(input_string):\n result = ''\n for c in input_string:\n next_char = c\n if str.isalpha(c):\n if c.upper() < 'H':\n if c.islower():\n next_char = chr(ord(c) + 19).upper()\n else:\n next_char = chr(ord(c) + 19).lower()\n elif c.islower():\n next_char = chr(ord(c) - 7).upper()\n else:\n next_char = chr(ord(c) - 7).lower()\n result = result + next_char\n return result\n\n\ndef past_tense(words):\n result = []\n irregular_dict = {'have': 'had', 'be': 'was', 'eat': 'ate', 'go': 'went'}\n for word in words:\n word = str.lower(word)\n if word in irregular_dict.keys():\n result.append(irregular_dict[word])\n elif word[-1] is 'e':\n result.append(word + 'd')\n elif word[-1] is 'y' and word[-2] not in 'aeiou':\n result.append(word[:-1] + 'ied')\n elif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3\n ] not in 'aeiou':\n result.append(word + word[-1] + 'ed')\n else:\n result.append(word + 'ed')\n return result\n",
"step-5": "# CSE 415 Winter 2019\n# Assignment 1\n# Jichun Li 1531264\n\n# Part A\n# 1\ndef five_x_cubed_plus_1(x):\n\treturn 5 * (x ** 3) + 1\n\n#2\ndef pair_off(ary):\n result = []\n \n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if (int (len(ary) % 2) == 1):\n result.append([ary[-1]])\n return result\n\n#3\ndef mystery_code(input_string):\n\tresult = ''\n\tfor c in input_string:\n\t\tnext_char = c\n\t\tif str.isalpha(c):\n\t\t\tif c.upper() < 'H':\n\t\t\t\tif c.islower():\n\t\t\t\t\tnext_char = chr(ord(c) + 19).upper()\n\t\t\t\telse:\n\t\t\t\t\tnext_char = chr(ord(c) + 19).lower()\n\t\t\telse:\n\t\t\t\tif c.islower():\n\t\t\t\t\tnext_char = chr(ord(c) - 7).upper()\n\t\t\t\telse:\n\t\t\t\t\tnext_char = chr(ord(c) - 7).lower()\n\t\tresult = result + next_char\n\treturn result\n\n#4\ndef past_tense(words):\n\tresult = []\n\tirregular_dict = {'have':'had',\n\t\t\t 'be':'was',\n\t\t\t 'eat':'ate',\n\t\t\t 'go':'went'}\n\tfor word in words:\n\t\tword = str.lower(word)\n\t\tif word in irregular_dict.keys():\n\t\t\tresult.append(irregular_dict[word])\n\t\telif word[-1] is 'e':\n\t\t\tresult.append(word + 'd')\n\t\telif word[-1] is 'y' and word[-2] not in 'aeiou':\n\t\t\tresult.append(word[:-1] + 'ied')\n\t\telif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3] not in 'aeiou':\n\t\t\tresult.append(word + word[-1] + 'ed')\n\t\telse:\n\t\t\tresult.append(word + 'ed')\n\treturn result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest
from collections import Counter
class Solution(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
n = len(changed)
if n % 2 != 0:
return []
freq = Counter(changed)
changed.sort()
ans = []
for num in changed:
if num in freq and freq[num] > 0:
freq[num] -= 1
double_num = 2 * num
if double_num in freq and freq[double_num] > 0:
ans.append(num)
freq[double_num] -= 1
else:
return []
return ans
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "d5acda0d5d066d381a7f6310eb4fe6280d7e84de",
"index": 5309,
"step-1": "<mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom collections import Counter\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
5,
6
]
}
|
[
2,
3,
5,
6
] |
import copy
from typing import List, Optional, Tuple, NamedTuple, Union, Callable
import torch
from torch import Tensor
from torch_sparse import SparseTensor
import time
import torch_quiver as qv
from torch.distributed import rpc
def subgraph_nodes_n(nodes, i):
row, col, edge_index = None, None, None
return row, col, edge_index
class Comm:
def __init__(self, rank, world_size):
self.rank = rank
self.world_size = world_size
subgraph_nodes = subgraph_nodes_n
class EdgeIndex(NamedTuple):
edge_index: Tensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
edge_index = self.edge_index.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return EdgeIndex(edge_index, e_id, self.size)
class Adj(NamedTuple):
adj_t: SparseTensor
e_id: Optional[Tensor]
size: Tuple[int, int]
def to(self, *args, **kwargs):
adj_t = self.adj_t.to(*args, **kwargs)
e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
return Adj(adj_t, e_id, self.size)
class RandomIndexSampler(torch.utils.data.Sampler):
def __init__(self, num_nodes: int, num_parts: int, shuffle: bool = False):
self.N = num_nodes
self.num_parts = num_parts
self.shuffle = shuffle
self.n_ids = self.get_node_indices()
def get_node_indices(self):
n_id = torch.randint(self.num_parts, (self.N, ), dtype=torch.long)
n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1)
for i in range(self.num_parts)]
return n_ids
def __iter__(self):
if self.shuffle:
self.n_ids = self.get_node_indices()
return iter(self.n_ids)
def __len__(self):
return self.num_parts
class distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):
r"""A data loader that randomly samples nodes within a graph and returns
their induced subgraph.
.. note::
For an example of using :obj:`RandomNodeSampler`, see
`examples/ogbn_proteins_deepgcn.py
<https://github.com/rusty1s/pytorch_geometric/blob/master/examples/
ogbn_proteins_deepgcn.py>`_.
Args:
data (torch_geometric.data.Data): The graph data object.
num_parts (int): The number of partitions.
shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled
at every epoch (default: :obj:`False`).
**kwargs (optional): Additional arguments of
:class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.
"""
def __init__(self, comm,
graph,
feature_func,
device,
num_parts: int,
shuffle: bool = False,
**kwargs):
self.comm = comm
data, local2global, global2local, node2rank = graph
self.local2global = local2global
self.global2local = global2local
self.node2rank = node2rank
self.node_feature = feature_func
self.cuda_device = torch.device('cuda:' + str(device))
assert data.edge_index is not None
self.N = N = data.num_nodes
self.E = data.num_edges
self.adj = SparseTensor(
row=data.edge_index[0], col=data.edge_index[1],
value=torch.arange(self.E, device=data.edge_index.device),
sparse_sizes=(N, N)).to(self.cuda_device)
self.data = copy.copy(data)
self.data.edge_index = None
super(distributeCudaRandomNodeSampler, self).__init__(
self, batch_size=1,
sampler=RandomIndexSampler(self.N, num_parts, shuffle),
collate_fn=self.__collate__, **kwargs)
self.deg_out = self.adj.storage.rowcount()
def __getitem__(self, idx):
return idx
def __cuda_saint_subgraph__(
self, node_idx: torch.Tensor) -> Tuple[SparseTensor, torch.Tensor]:
rows = []
cols = []
edge_indices = []
# splite node idx
ranks = self.node2rank(node_idx)
local_nodes = None
futures = []
adj_row, adj_col, adj_value = self.adj.coo()
adj_rowptr = self.adj.storage.rowptr()
cpu = torch.device('cpu')
for i in range(self.comm.world_size):
# for every device check how many nodes on the device
mask = torch.eq(ranks, i)
part_nodes = torch.masked_select(node_idx, mask)
# nodes as the the current, pointer ordered inputs to accumulate the partial nodes
if part_nodes.size(0) >= 1:
# if current server then local
if i == self.comm.rank:
local_nodes = part_nodes
futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))
# remote server
else:
futures.append(
rpc.rpc_async(f"worker{i}",
subgraph_nodes,
args=(part_nodes, 1),
kwargs=None,
timeout=-1.0))
else:
futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))
# local server has nodes
if local_nodes is not None:
nodes = self.global2local(local_nodes)
nodes = nodes.to(self.cuda_device)
deg = torch.index_select(self.deg_out, 0, nodes)
row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr, adj_row, adj_col, deg)
row = row.to(cpu)
col = col.to(cpu)
edge_index = edge_index.to(cpu)
futures[self.comm.rank] = row, col, edge_index
for i in range(len(futures)):
if not isinstance(futures[i], tuple):
futures[i] = futures[i].wait()
row, col, edge_index = futures[i]
rows.append(row)
cols.append(col)
edge_indices.append(edge_index)
ret_row = torch.cat(rows)
ret_cols = torch.cat(cols)
ret_edgeindex = torch.cat(edge_indices)
if adj_value is not None:
ret_vals = adj_value[ret_edgeindex].to(cpu)
out = SparseTensor(row = ret_row,
rowptr = None,
col= ret_cols,
value = ret_vals,
sparse_sizes=(node_idx.size(0), node_idx.size(0)),
is_sorted=False)
return out, ret_edgeindex
def __collate__(self, node_idx):
node_idx = node_idx[0]
data = self.data.__class__()
data.num_nodes = node_idx.size(0)
node_idx = node_idx.unique()
adj, _ = self.__cuda_saint_subgraph__(node_idx)
row, col, edge_idx = adj.coo()
data.edge_index = torch.stack([row, col], dim=0)
data.node_idx = node_idx
data.train_mask = self.data.train_mask[node_idx]
for key, item in self.data:
if isinstance(item, Tensor) and item.size(0) == self.N:
data[key] = item[node_idx]
elif isinstance(item, Tensor) and item.size(0) == self.E:
data[key] = item[edge_idx]
else:
data[key] = item
return data
|
normal
|
{
"blob_id": "3f4f396d1d18611e0248a08b42328422ca4b8146",
"index": 4766,
"step-1": "<mask token>\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n <mask token>\n\n\nclass RandomIndexSampler(torch.utils.data.Sampler):\n\n def __init__(self, num_nodes: int, num_parts: int, shuffle: bool=False):\n self.N = num_nodes\n self.num_parts = num_parts\n self.shuffle = shuffle\n self.n_ids = self.get_node_indices()\n\n def get_node_indices(self):\n n_id = torch.randint(self.num_parts, (self.N,), dtype=torch.long)\n n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1) for i in\n range(self.num_parts)]\n return n_ids\n\n def __iter__(self):\n if self.shuffle:\n self.n_ids = self.get_node_indices()\n return iter(self.n_ids)\n\n def __len__(self):\n return self.num_parts\n\n\nclass distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):\n \"\"\"A data loader that randomly samples nodes within a graph and returns\n their induced subgraph.\n\n .. note::\n\n For an example of using :obj:`RandomNodeSampler`, see\n `examples/ogbn_proteins_deepgcn.py\n <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n ogbn_proteins_deepgcn.py>`_.\n\n Args:\n data (torch_geometric.data.Data): The graph data object.\n num_parts (int): The number of partitions.\n shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled\n at every epoch (default: :obj:`False`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.\n \"\"\"\n\n def __init__(self, comm, graph, feature_func, device, num_parts: int,\n shuffle: bool=False, **kwargs):\n self.comm = comm\n data, local2global, global2local, node2rank = graph\n self.local2global = local2global\n self.global2local = global2local\n self.node2rank = node2rank\n self.node_feature = feature_func\n self.cuda_device = torch.device('cuda:' + str(device))\n assert data.edge_index is not None\n self.N = N = data.num_nodes\n self.E = data.num_edges\n self.adj = SparseTensor(row=data.edge_index[0], col=data.edge_index\n [1], value=torch.arange(self.E, device=data.edge_index.device),\n sparse_sizes=(N, N)).to(self.cuda_device)\n self.data = copy.copy(data)\n self.data.edge_index = None\n super(distributeCudaRandomNodeSampler, self).__init__(self,\n batch_size=1, sampler=RandomIndexSampler(self.N, num_parts,\n shuffle), collate_fn=self.__collate__, **kwargs)\n self.deg_out = self.adj.storage.rowcount()\n\n def __getitem__(self, idx):\n return idx\n\n def __cuda_saint_subgraph__(self, node_idx: torch.Tensor) ->Tuple[\n SparseTensor, torch.Tensor]:\n rows = []\n cols = []\n edge_indices = []\n ranks = self.node2rank(node_idx)\n local_nodes = None\n futures = []\n adj_row, adj_col, adj_value = self.adj.coo()\n adj_rowptr = self.adj.storage.rowptr()\n cpu = torch.device('cpu')\n for i in range(self.comm.world_size):\n mask = torch.eq(ranks, i)\n part_nodes = torch.masked_select(node_idx, mask)\n if part_nodes.size(0) >= 1:\n if i == self.comm.rank:\n local_nodes = part_nodes\n futures.append((torch.LongTensor([]), torch.LongTensor(\n []), torch.LongTensor([])))\n else:\n futures.append(rpc.rpc_async(f'worker{i}',\n subgraph_nodes, args=(part_nodes, 1), kwargs=None,\n timeout=-1.0))\n else:\n futures.append((torch.LongTensor([]), torch.LongTensor([]),\n torch.LongTensor([])))\n if local_nodes is not None:\n nodes = self.global2local(local_nodes)\n nodes = nodes.to(self.cuda_device)\n deg = torch.index_select(self.deg_out, 0, nodes)\n row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr,\n adj_row, adj_col, deg)\n row = row.to(cpu)\n col = col.to(cpu)\n edge_index = edge_index.to(cpu)\n futures[self.comm.rank] = row, col, edge_index\n for i in range(len(futures)):\n if not isinstance(futures[i], tuple):\n futures[i] = futures[i].wait()\n row, col, edge_index = futures[i]\n rows.append(row)\n cols.append(col)\n edge_indices.append(edge_index)\n ret_row = torch.cat(rows)\n ret_cols = torch.cat(cols)\n ret_edgeindex = torch.cat(edge_indices)\n if adj_value is not None:\n ret_vals = adj_value[ret_edgeindex].to(cpu)\n out = SparseTensor(row=ret_row, rowptr=None, col=ret_cols, value=\n ret_vals, sparse_sizes=(node_idx.size(0), node_idx.size(0)),\n is_sorted=False)\n return out, ret_edgeindex\n\n def __collate__(self, node_idx):\n node_idx = node_idx[0]\n data = self.data.__class__()\n data.num_nodes = node_idx.size(0)\n node_idx = node_idx.unique()\n adj, _ = self.__cuda_saint_subgraph__(node_idx)\n row, col, edge_idx = adj.coo()\n data.edge_index = torch.stack([row, col], dim=0)\n data.node_idx = node_idx\n data.train_mask = self.data.train_mask[node_idx]\n for key, item in self.data:\n if isinstance(item, Tensor) and item.size(0) == self.N:\n data[key] = item[node_idx]\n elif isinstance(item, Tensor) and item.size(0) == self.E:\n data[key] = item[edge_idx]\n else:\n data[key] = item\n return data\n",
"step-2": "<mask token>\n\n\nclass EdgeIndex(NamedTuple):\n edge_index: Tensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n edge_index = self.edge_index.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return EdgeIndex(edge_index, e_id, self.size)\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n adj_t = self.adj_t.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return Adj(adj_t, e_id, self.size)\n\n\nclass RandomIndexSampler(torch.utils.data.Sampler):\n\n def __init__(self, num_nodes: int, num_parts: int, shuffle: bool=False):\n self.N = num_nodes\n self.num_parts = num_parts\n self.shuffle = shuffle\n self.n_ids = self.get_node_indices()\n\n def get_node_indices(self):\n n_id = torch.randint(self.num_parts, (self.N,), dtype=torch.long)\n n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1) for i in\n range(self.num_parts)]\n return n_ids\n\n def __iter__(self):\n if self.shuffle:\n self.n_ids = self.get_node_indices()\n return iter(self.n_ids)\n\n def __len__(self):\n return self.num_parts\n\n\nclass distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):\n \"\"\"A data loader that randomly samples nodes within a graph and returns\n their induced subgraph.\n\n .. note::\n\n For an example of using :obj:`RandomNodeSampler`, see\n `examples/ogbn_proteins_deepgcn.py\n <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n ogbn_proteins_deepgcn.py>`_.\n\n Args:\n data (torch_geometric.data.Data): The graph data object.\n num_parts (int): The number of partitions.\n shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled\n at every epoch (default: :obj:`False`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.\n \"\"\"\n\n def __init__(self, comm, graph, feature_func, device, num_parts: int,\n shuffle: bool=False, **kwargs):\n self.comm = comm\n data, local2global, global2local, node2rank = graph\n self.local2global = local2global\n self.global2local = global2local\n self.node2rank = node2rank\n self.node_feature = feature_func\n self.cuda_device = torch.device('cuda:' + str(device))\n assert data.edge_index is not None\n self.N = N = data.num_nodes\n self.E = data.num_edges\n self.adj = SparseTensor(row=data.edge_index[0], col=data.edge_index\n [1], value=torch.arange(self.E, device=data.edge_index.device),\n sparse_sizes=(N, N)).to(self.cuda_device)\n self.data = copy.copy(data)\n self.data.edge_index = None\n super(distributeCudaRandomNodeSampler, self).__init__(self,\n batch_size=1, sampler=RandomIndexSampler(self.N, num_parts,\n shuffle), collate_fn=self.__collate__, **kwargs)\n self.deg_out = self.adj.storage.rowcount()\n\n def __getitem__(self, idx):\n return idx\n\n def __cuda_saint_subgraph__(self, node_idx: torch.Tensor) ->Tuple[\n SparseTensor, torch.Tensor]:\n rows = []\n cols = []\n edge_indices = []\n ranks = self.node2rank(node_idx)\n local_nodes = None\n futures = []\n adj_row, adj_col, adj_value = self.adj.coo()\n adj_rowptr = self.adj.storage.rowptr()\n cpu = torch.device('cpu')\n for i in range(self.comm.world_size):\n mask = torch.eq(ranks, i)\n part_nodes = torch.masked_select(node_idx, mask)\n if part_nodes.size(0) >= 1:\n if i == self.comm.rank:\n local_nodes = part_nodes\n futures.append((torch.LongTensor([]), torch.LongTensor(\n []), torch.LongTensor([])))\n else:\n futures.append(rpc.rpc_async(f'worker{i}',\n subgraph_nodes, args=(part_nodes, 1), kwargs=None,\n timeout=-1.0))\n else:\n futures.append((torch.LongTensor([]), torch.LongTensor([]),\n torch.LongTensor([])))\n if local_nodes is not None:\n nodes = self.global2local(local_nodes)\n nodes = nodes.to(self.cuda_device)\n deg = torch.index_select(self.deg_out, 0, nodes)\n row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr,\n adj_row, adj_col, deg)\n row = row.to(cpu)\n col = col.to(cpu)\n edge_index = edge_index.to(cpu)\n futures[self.comm.rank] = row, col, edge_index\n for i in range(len(futures)):\n if not isinstance(futures[i], tuple):\n futures[i] = futures[i].wait()\n row, col, edge_index = futures[i]\n rows.append(row)\n cols.append(col)\n edge_indices.append(edge_index)\n ret_row = torch.cat(rows)\n ret_cols = torch.cat(cols)\n ret_edgeindex = torch.cat(edge_indices)\n if adj_value is not None:\n ret_vals = adj_value[ret_edgeindex].to(cpu)\n out = SparseTensor(row=ret_row, rowptr=None, col=ret_cols, value=\n ret_vals, sparse_sizes=(node_idx.size(0), node_idx.size(0)),\n is_sorted=False)\n return out, ret_edgeindex\n\n def __collate__(self, node_idx):\n node_idx = node_idx[0]\n data = self.data.__class__()\n data.num_nodes = node_idx.size(0)\n node_idx = node_idx.unique()\n adj, _ = self.__cuda_saint_subgraph__(node_idx)\n row, col, edge_idx = adj.coo()\n data.edge_index = torch.stack([row, col], dim=0)\n data.node_idx = node_idx\n data.train_mask = self.data.train_mask[node_idx]\n for key, item in self.data:\n if isinstance(item, Tensor) and item.size(0) == self.N:\n data[key] = item[node_idx]\n elif isinstance(item, Tensor) and item.size(0) == self.E:\n data[key] = item[edge_idx]\n else:\n data[key] = item\n return data\n",
"step-3": "<mask token>\n\n\ndef subgraph_nodes_n(nodes, i):\n row, col, edge_index = None, None, None\n return row, col, edge_index\n\n\nclass Comm:\n\n def __init__(self, rank, world_size):\n self.rank = rank\n self.world_size = world_size\n\n\n<mask token>\n\n\nclass EdgeIndex(NamedTuple):\n edge_index: Tensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n edge_index = self.edge_index.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return EdgeIndex(edge_index, e_id, self.size)\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n adj_t = self.adj_t.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return Adj(adj_t, e_id, self.size)\n\n\nclass RandomIndexSampler(torch.utils.data.Sampler):\n\n def __init__(self, num_nodes: int, num_parts: int, shuffle: bool=False):\n self.N = num_nodes\n self.num_parts = num_parts\n self.shuffle = shuffle\n self.n_ids = self.get_node_indices()\n\n def get_node_indices(self):\n n_id = torch.randint(self.num_parts, (self.N,), dtype=torch.long)\n n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1) for i in\n range(self.num_parts)]\n return n_ids\n\n def __iter__(self):\n if self.shuffle:\n self.n_ids = self.get_node_indices()\n return iter(self.n_ids)\n\n def __len__(self):\n return self.num_parts\n\n\nclass distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):\n \"\"\"A data loader that randomly samples nodes within a graph and returns\n their induced subgraph.\n\n .. note::\n\n For an example of using :obj:`RandomNodeSampler`, see\n `examples/ogbn_proteins_deepgcn.py\n <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n ogbn_proteins_deepgcn.py>`_.\n\n Args:\n data (torch_geometric.data.Data): The graph data object.\n num_parts (int): The number of partitions.\n shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled\n at every epoch (default: :obj:`False`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.\n \"\"\"\n\n def __init__(self, comm, graph, feature_func, device, num_parts: int,\n shuffle: bool=False, **kwargs):\n self.comm = comm\n data, local2global, global2local, node2rank = graph\n self.local2global = local2global\n self.global2local = global2local\n self.node2rank = node2rank\n self.node_feature = feature_func\n self.cuda_device = torch.device('cuda:' + str(device))\n assert data.edge_index is not None\n self.N = N = data.num_nodes\n self.E = data.num_edges\n self.adj = SparseTensor(row=data.edge_index[0], col=data.edge_index\n [1], value=torch.arange(self.E, device=data.edge_index.device),\n sparse_sizes=(N, N)).to(self.cuda_device)\n self.data = copy.copy(data)\n self.data.edge_index = None\n super(distributeCudaRandomNodeSampler, self).__init__(self,\n batch_size=1, sampler=RandomIndexSampler(self.N, num_parts,\n shuffle), collate_fn=self.__collate__, **kwargs)\n self.deg_out = self.adj.storage.rowcount()\n\n def __getitem__(self, idx):\n return idx\n\n def __cuda_saint_subgraph__(self, node_idx: torch.Tensor) ->Tuple[\n SparseTensor, torch.Tensor]:\n rows = []\n cols = []\n edge_indices = []\n ranks = self.node2rank(node_idx)\n local_nodes = None\n futures = []\n adj_row, adj_col, adj_value = self.adj.coo()\n adj_rowptr = self.adj.storage.rowptr()\n cpu = torch.device('cpu')\n for i in range(self.comm.world_size):\n mask = torch.eq(ranks, i)\n part_nodes = torch.masked_select(node_idx, mask)\n if part_nodes.size(0) >= 1:\n if i == self.comm.rank:\n local_nodes = part_nodes\n futures.append((torch.LongTensor([]), torch.LongTensor(\n []), torch.LongTensor([])))\n else:\n futures.append(rpc.rpc_async(f'worker{i}',\n subgraph_nodes, args=(part_nodes, 1), kwargs=None,\n timeout=-1.0))\n else:\n futures.append((torch.LongTensor([]), torch.LongTensor([]),\n torch.LongTensor([])))\n if local_nodes is not None:\n nodes = self.global2local(local_nodes)\n nodes = nodes.to(self.cuda_device)\n deg = torch.index_select(self.deg_out, 0, nodes)\n row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr,\n adj_row, adj_col, deg)\n row = row.to(cpu)\n col = col.to(cpu)\n edge_index = edge_index.to(cpu)\n futures[self.comm.rank] = row, col, edge_index\n for i in range(len(futures)):\n if not isinstance(futures[i], tuple):\n futures[i] = futures[i].wait()\n row, col, edge_index = futures[i]\n rows.append(row)\n cols.append(col)\n edge_indices.append(edge_index)\n ret_row = torch.cat(rows)\n ret_cols = torch.cat(cols)\n ret_edgeindex = torch.cat(edge_indices)\n if adj_value is not None:\n ret_vals = adj_value[ret_edgeindex].to(cpu)\n out = SparseTensor(row=ret_row, rowptr=None, col=ret_cols, value=\n ret_vals, sparse_sizes=(node_idx.size(0), node_idx.size(0)),\n is_sorted=False)\n return out, ret_edgeindex\n\n def __collate__(self, node_idx):\n node_idx = node_idx[0]\n data = self.data.__class__()\n data.num_nodes = node_idx.size(0)\n node_idx = node_idx.unique()\n adj, _ = self.__cuda_saint_subgraph__(node_idx)\n row, col, edge_idx = adj.coo()\n data.edge_index = torch.stack([row, col], dim=0)\n data.node_idx = node_idx\n data.train_mask = self.data.train_mask[node_idx]\n for key, item in self.data:\n if isinstance(item, Tensor) and item.size(0) == self.N:\n data[key] = item[node_idx]\n elif isinstance(item, Tensor) and item.size(0) == self.E:\n data[key] = item[edge_idx]\n else:\n data[key] = item\n return data\n",
"step-4": "import copy\nfrom typing import List, Optional, Tuple, NamedTuple, Union, Callable\nimport torch\nfrom torch import Tensor\nfrom torch_sparse import SparseTensor\nimport time\nimport torch_quiver as qv\nfrom torch.distributed import rpc\n\n\ndef subgraph_nodes_n(nodes, i):\n row, col, edge_index = None, None, None\n return row, col, edge_index\n\n\nclass Comm:\n\n def __init__(self, rank, world_size):\n self.rank = rank\n self.world_size = world_size\n\n\nsubgraph_nodes = subgraph_nodes_n\n\n\nclass EdgeIndex(NamedTuple):\n edge_index: Tensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n edge_index = self.edge_index.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return EdgeIndex(edge_index, e_id, self.size)\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n adj_t = self.adj_t.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return Adj(adj_t, e_id, self.size)\n\n\nclass RandomIndexSampler(torch.utils.data.Sampler):\n\n def __init__(self, num_nodes: int, num_parts: int, shuffle: bool=False):\n self.N = num_nodes\n self.num_parts = num_parts\n self.shuffle = shuffle\n self.n_ids = self.get_node_indices()\n\n def get_node_indices(self):\n n_id = torch.randint(self.num_parts, (self.N,), dtype=torch.long)\n n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1) for i in\n range(self.num_parts)]\n return n_ids\n\n def __iter__(self):\n if self.shuffle:\n self.n_ids = self.get_node_indices()\n return iter(self.n_ids)\n\n def __len__(self):\n return self.num_parts\n\n\nclass distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):\n \"\"\"A data loader that randomly samples nodes within a graph and returns\n their induced subgraph.\n\n .. note::\n\n For an example of using :obj:`RandomNodeSampler`, see\n `examples/ogbn_proteins_deepgcn.py\n <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n ogbn_proteins_deepgcn.py>`_.\n\n Args:\n data (torch_geometric.data.Data): The graph data object.\n num_parts (int): The number of partitions.\n shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled\n at every epoch (default: :obj:`False`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.\n \"\"\"\n\n def __init__(self, comm, graph, feature_func, device, num_parts: int,\n shuffle: bool=False, **kwargs):\n self.comm = comm\n data, local2global, global2local, node2rank = graph\n self.local2global = local2global\n self.global2local = global2local\n self.node2rank = node2rank\n self.node_feature = feature_func\n self.cuda_device = torch.device('cuda:' + str(device))\n assert data.edge_index is not None\n self.N = N = data.num_nodes\n self.E = data.num_edges\n self.adj = SparseTensor(row=data.edge_index[0], col=data.edge_index\n [1], value=torch.arange(self.E, device=data.edge_index.device),\n sparse_sizes=(N, N)).to(self.cuda_device)\n self.data = copy.copy(data)\n self.data.edge_index = None\n super(distributeCudaRandomNodeSampler, self).__init__(self,\n batch_size=1, sampler=RandomIndexSampler(self.N, num_parts,\n shuffle), collate_fn=self.__collate__, **kwargs)\n self.deg_out = self.adj.storage.rowcount()\n\n def __getitem__(self, idx):\n return idx\n\n def __cuda_saint_subgraph__(self, node_idx: torch.Tensor) ->Tuple[\n SparseTensor, torch.Tensor]:\n rows = []\n cols = []\n edge_indices = []\n ranks = self.node2rank(node_idx)\n local_nodes = None\n futures = []\n adj_row, adj_col, adj_value = self.adj.coo()\n adj_rowptr = self.adj.storage.rowptr()\n cpu = torch.device('cpu')\n for i in range(self.comm.world_size):\n mask = torch.eq(ranks, i)\n part_nodes = torch.masked_select(node_idx, mask)\n if part_nodes.size(0) >= 1:\n if i == self.comm.rank:\n local_nodes = part_nodes\n futures.append((torch.LongTensor([]), torch.LongTensor(\n []), torch.LongTensor([])))\n else:\n futures.append(rpc.rpc_async(f'worker{i}',\n subgraph_nodes, args=(part_nodes, 1), kwargs=None,\n timeout=-1.0))\n else:\n futures.append((torch.LongTensor([]), torch.LongTensor([]),\n torch.LongTensor([])))\n if local_nodes is not None:\n nodes = self.global2local(local_nodes)\n nodes = nodes.to(self.cuda_device)\n deg = torch.index_select(self.deg_out, 0, nodes)\n row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr,\n adj_row, adj_col, deg)\n row = row.to(cpu)\n col = col.to(cpu)\n edge_index = edge_index.to(cpu)\n futures[self.comm.rank] = row, col, edge_index\n for i in range(len(futures)):\n if not isinstance(futures[i], tuple):\n futures[i] = futures[i].wait()\n row, col, edge_index = futures[i]\n rows.append(row)\n cols.append(col)\n edge_indices.append(edge_index)\n ret_row = torch.cat(rows)\n ret_cols = torch.cat(cols)\n ret_edgeindex = torch.cat(edge_indices)\n if adj_value is not None:\n ret_vals = adj_value[ret_edgeindex].to(cpu)\n out = SparseTensor(row=ret_row, rowptr=None, col=ret_cols, value=\n ret_vals, sparse_sizes=(node_idx.size(0), node_idx.size(0)),\n is_sorted=False)\n return out, ret_edgeindex\n\n def __collate__(self, node_idx):\n node_idx = node_idx[0]\n data = self.data.__class__()\n data.num_nodes = node_idx.size(0)\n node_idx = node_idx.unique()\n adj, _ = self.__cuda_saint_subgraph__(node_idx)\n row, col, edge_idx = adj.coo()\n data.edge_index = torch.stack([row, col], dim=0)\n data.node_idx = node_idx\n data.train_mask = self.data.train_mask[node_idx]\n for key, item in self.data:\n if isinstance(item, Tensor) and item.size(0) == self.N:\n data[key] = item[node_idx]\n elif isinstance(item, Tensor) and item.size(0) == self.E:\n data[key] = item[edge_idx]\n else:\n data[key] = item\n return data\n",
"step-5": "import copy\nfrom typing import List, Optional, Tuple, NamedTuple, Union, Callable\n\nimport torch\nfrom torch import Tensor\nfrom torch_sparse import SparseTensor\nimport time\nimport torch_quiver as qv\nfrom torch.distributed import rpc\n\ndef subgraph_nodes_n(nodes, i):\n row, col, edge_index = None, None, None\n return row, col, edge_index\n\nclass Comm:\n def __init__(self, rank, world_size):\n self.rank = rank\n self.world_size = world_size\n\nsubgraph_nodes = subgraph_nodes_n\n\nclass EdgeIndex(NamedTuple):\n edge_index: Tensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n edge_index = self.edge_index.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return EdgeIndex(edge_index, e_id, self.size)\n\n\nclass Adj(NamedTuple):\n adj_t: SparseTensor\n e_id: Optional[Tensor]\n size: Tuple[int, int]\n\n def to(self, *args, **kwargs):\n adj_t = self.adj_t.to(*args, **kwargs)\n e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None\n return Adj(adj_t, e_id, self.size)\n\nclass RandomIndexSampler(torch.utils.data.Sampler):\n def __init__(self, num_nodes: int, num_parts: int, shuffle: bool = False):\n self.N = num_nodes\n self.num_parts = num_parts\n self.shuffle = shuffle\n self.n_ids = self.get_node_indices()\n\n def get_node_indices(self):\n n_id = torch.randint(self.num_parts, (self.N, ), dtype=torch.long)\n n_ids = [(n_id == i).nonzero(as_tuple=False).view(-1)\n for i in range(self.num_parts)]\n return n_ids\n\n def __iter__(self):\n if self.shuffle:\n self.n_ids = self.get_node_indices()\n return iter(self.n_ids)\n\n def __len__(self):\n return self.num_parts\n\n\nclass distributeCudaRandomNodeSampler(torch.utils.data.DataLoader):\n r\"\"\"A data loader that randomly samples nodes within a graph and returns\n their induced subgraph.\n\n .. note::\n\n For an example of using :obj:`RandomNodeSampler`, see\n `examples/ogbn_proteins_deepgcn.py\n <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/\n ogbn_proteins_deepgcn.py>`_.\n\n Args:\n data (torch_geometric.data.Data): The graph data object.\n num_parts (int): The number of partitions.\n shuffle (bool, optional): If set to :obj:`True`, the data is reshuffled\n at every epoch (default: :obj:`False`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`num_workers`.\n \"\"\"\n def __init__(self, comm,\n graph,\n feature_func,\n device,\n num_parts: int,\n shuffle: bool = False,\n **kwargs):\n self.comm = comm\n data, local2global, global2local, node2rank = graph\n self.local2global = local2global\n self.global2local = global2local\n self.node2rank = node2rank\n self.node_feature = feature_func\n self.cuda_device = torch.device('cuda:' + str(device))\n\n assert data.edge_index is not None\n\n self.N = N = data.num_nodes\n self.E = data.num_edges\n self.adj = SparseTensor(\n row=data.edge_index[0], col=data.edge_index[1],\n value=torch.arange(self.E, device=data.edge_index.device),\n sparse_sizes=(N, N)).to(self.cuda_device)\n self.data = copy.copy(data)\n self.data.edge_index = None\n\n super(distributeCudaRandomNodeSampler, self).__init__(\n self, batch_size=1,\n sampler=RandomIndexSampler(self.N, num_parts, shuffle),\n collate_fn=self.__collate__, **kwargs)\n self.deg_out = self.adj.storage.rowcount()\n\n def __getitem__(self, idx):\n return idx\n\n def __cuda_saint_subgraph__(\n self, node_idx: torch.Tensor) -> Tuple[SparseTensor, torch.Tensor]:\n rows = []\n cols = []\n edge_indices = []\n # splite node idx\n ranks = self.node2rank(node_idx)\n local_nodes = None\n futures = []\n adj_row, adj_col, adj_value = self.adj.coo()\n adj_rowptr = self.adj.storage.rowptr()\n cpu = torch.device('cpu')\n\n for i in range(self.comm.world_size):\n # for every device check how many nodes on the device\n mask = torch.eq(ranks, i)\n part_nodes = torch.masked_select(node_idx, mask)\n # nodes as the the current, pointer ordered inputs to accumulate the partial nodes\n if part_nodes.size(0) >= 1:\n # if current server then local\n if i == self.comm.rank:\n local_nodes = part_nodes\n futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))\n # remote server\n else:\n futures.append(\n rpc.rpc_async(f\"worker{i}\",\n subgraph_nodes,\n args=(part_nodes, 1),\n kwargs=None,\n timeout=-1.0))\n\n else:\n futures.append((torch.LongTensor([]), torch.LongTensor([]), torch.LongTensor([])))\n # local server has nodes\n if local_nodes is not None:\n nodes = self.global2local(local_nodes)\n nodes = nodes.to(self.cuda_device)\n\n deg = torch.index_select(self.deg_out, 0, nodes)\n row, col, edge_index = qv.saint_subgraph(nodes, adj_rowptr, adj_row, adj_col, deg)\n\n row = row.to(cpu)\n col = col.to(cpu)\n edge_index = edge_index.to(cpu)\n\n futures[self.comm.rank] = row, col, edge_index\n\n for i in range(len(futures)):\n if not isinstance(futures[i], tuple):\n futures[i] = futures[i].wait()\n row, col, edge_index = futures[i]\n rows.append(row)\n cols.append(col)\n edge_indices.append(edge_index)\n\n ret_row = torch.cat(rows)\n ret_cols = torch.cat(cols)\n ret_edgeindex = torch.cat(edge_indices)\n\n if adj_value is not None:\n ret_vals = adj_value[ret_edgeindex].to(cpu)\n out = SparseTensor(row = ret_row,\n rowptr = None,\n col= ret_cols,\n value = ret_vals,\n sparse_sizes=(node_idx.size(0), node_idx.size(0)),\n is_sorted=False)\n return out, ret_edgeindex\n\n def __collate__(self, node_idx):\n node_idx = node_idx[0]\n data = self.data.__class__()\n data.num_nodes = node_idx.size(0)\n node_idx = node_idx.unique()\n adj, _ = self.__cuda_saint_subgraph__(node_idx)\n row, col, edge_idx = adj.coo()\n data.edge_index = torch.stack([row, col], dim=0)\n data.node_idx = node_idx\n data.train_mask = self.data.train_mask[node_idx]\n for key, item in self.data:\n if isinstance(item, Tensor) and item.size(0) == self.N:\n data[key] = item[node_idx]\n elif isinstance(item, Tensor) and item.size(0) == self.E:\n data[key] = item[edge_idx]\n else:\n data[key] = item\n return data",
"step-ids": [
12,
15,
18,
20,
21
]
}
|
[
12,
15,
18,
20,
21
] |
# -*- coding: utf-8 -*-
'''
* EAFS
* Copyright (C) 2009-2011 Adam Etienne <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 3.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import math,uuid,sys,os,time,operator,xmlrpclib,random,argparse
from eafslib import EAFSChunkServerRpc
class EAFSClient:
def __init__(self, master_host):
self.master = xmlrpclib.ServerProxy(master_host)
self.chunkservers = {}
def write(self, filename, data):
if self.exists(filename):
self.delete(filename)
num_chunks = self.num_chunks(len(data))
attributes = {"mode":"file", "atime":"", "ctime":"", "mtime":"", "attrs":""}
chunkuuids = self.master.alloc(filename, num_chunks, attributes)
self.write_chunks(chunkuuids, data)
def update_chunkservers(self):
chunkservers = self.master.get_chunkservers()
#print "CHUNKSERVERS[RAW]: ", chunkservers
for chunkserver in chunkservers:
#print chunkserver
if chunkserver['uuid'] not in self.chunkservers:
self.chunkservers[chunkserver['uuid']] = EAFSChunkServerRpc( chunkserver['uuid'], chunkserver['address'] )
def write_chunks(self, chunkuuids, data):
chunks = [ data[x:x+self.master.get_chunksize()] \
for x in range(0, len(data), self.master.get_chunksize()) ]
#chunkservers = self.master.get_chunkservers()
self.update_chunkservers()
#print "CHUNKSERVERS: ", self.chunkservers
for i in range(0, len(chunkuuids)): # write to each chunkserver
chunkuuid = chunkuuids[i]
chunklocs = self.master.get_chunklocs(chunkuuid)
for chunkloc in chunklocs:
#print "chunkloc: ", chunkloc
self.chunkservers[chunkloc].rpc.write(chunkuuid, chunks[i])
def num_chunks(self, size):
return (size // self.master.get_chunksize()) \
+ (1 if size % self.master.get_chunksize() > 0 else 0)
def write_append(self, filename, data):
if not self.exists(filename):
raise Exception("append error, file does not exist: " + filename)
num_append_chunks = self.num_chunks(len(data))
append_chunkuuids = self.master.alloc_append(filename, \
num_append_chunks)
self.write_chunks(append_chunkuuids, data)
def exists(self, filename):
return self.master.exists(filename)
def read(self, filename): # get metadata, then read chunks direct
if not self.exists(filename):
raise Exception("read error, file does not exist: " + filename)
chunks = []
chunkuuids = self.master.get_chunkuuids(filename)
#chunkservers = self.master.get_chunkservers()
self.update_chunkservers()
for chunkuuid in chunkuuids:
chunklocs = self.master.get_chunklocs(chunkuuid)
done_chunkserver = []
chunk = None
chunk_read = False
while not (chunk_read or len(done_chunkserver)==len(chunklocs)):
chunkidrnd = random.randint(0, len(chunklocs)-1)
while chunkidrnd not in done_chunkserver and len(done_chunkserver)>0:
chunkidrnd = random.randint(0, len(chunklocs)-1)
chunkloc = chunklocs[chunkidrnd]
print "Select chunkloc %s from %d choices" % (chunkloc, len(chunklocs))
try:
chunk = self.chunkservers[chunkloc].rpc.read(chunkuuid)
chunk_read = True
done_chunkserver.append(chunkidrnd)
except:
print "Chunkserver %d failed" % chunkidrnd
if not chunk_read:
raise Exception("read error, chunkserver unavailable: " + filename)
chunks.append(chunk)
data = reduce(lambda x, y: x + y, chunks) # reassemble in order
return data
def delete(self, filename):
self.master.delete(filename)
def main():
parser = argparse.ArgumentParser(description='EAFS Simple Client')
parser.add_argument('--master', dest='master', default='localhost:6799', help='Master server address')
args = parser.parse_args()
master = 'http://' + args.master
client = EAFSClient(master)
# test write, exist, read
print "\nWriting..."
#try:
if False:
client.write("/usr/python/readme.txt", """
This file tells you all about python that you ever wanted to know.
Not every README is as informative as this one, but we aim to please.
Never yet has there been so much information in so little space.
""")
#except:
# print client.master.dump_metadata()
print "File exists? ", client.exists("/usr/python/readme.txt")
print client.read("/usr/python/readme.txt")
# show structure of the filesystem
print "\nMetadata Dump..."
print client.master.dump_metadata()
if __name__ == "__main__":
main()
"""
# test append, read after append
#print "\nAppending..."
#client.write_append("/usr/python/readme.txt", \
# "I'm a little sentence that just snuck in at the end.\n")
#print client.read("/usr/python/readme.txt")
# test delete
#print "\nDeleting..."
#client.delete("/usr/python/readme.txt")
#print "File exists? ", client.exists("/usr/python/readme.txt")
# test exceptions
#print "\nTesting Exceptions..."
#try:
# client.read("/usr/python/readme.txt")
#except Exception as e:
# print "This exception should be thrown:", e
#try:
# client.write_append("/usr/python/readme.txt", "foo")
#except Exception as e:
# print "This exception should be thrown:", e
"""
|
normal
|
{
"blob_id": "2f5244c6144f5aafce29e5aba32bd7e3fc7ecf5b",
"index": 3632,
"step-1": "# -*- coding: utf-8 -*-\n'''\n * EAFS\n * Copyright (C) 2009-2011 Adam Etienne <[email protected]>\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation version 3.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see <http://www.gnu.org/licenses/>.\n'''\n\nimport math,uuid,sys,os,time,operator,xmlrpclib,random,argparse\nfrom eafslib import EAFSChunkServerRpc\n\n\nclass EAFSClient:\n\tdef __init__(self, master_host):\n\t\tself.master = xmlrpclib.ServerProxy(master_host)\n\t\tself.chunkservers = {}\n\n\tdef write(self, filename, data):\n\t\tif self.exists(filename):\n\t\t\tself.delete(filename)\n\t\tnum_chunks = self.num_chunks(len(data))\n\t\tattributes = {\"mode\":\"file\", \"atime\":\"\", \"ctime\":\"\", \"mtime\":\"\", \"attrs\":\"\"}\n\t\tchunkuuids = self.master.alloc(filename, num_chunks, attributes)\n\t\tself.write_chunks(chunkuuids, data)\n\t\n\tdef update_chunkservers(self):\n\t\tchunkservers = self.master.get_chunkservers()\n\t\t#print \"CHUNKSERVERS[RAW]: \", chunkservers\n\t\tfor chunkserver in chunkservers:\n\t\t\t#print chunkserver\n\t\t\tif chunkserver['uuid'] not in self.chunkservers:\n\t\t\t\tself.chunkservers[chunkserver['uuid']] = EAFSChunkServerRpc( chunkserver['uuid'], chunkserver['address'] )\n\t\t\n\tdef write_chunks(self, chunkuuids, data):\n\t\tchunks = [ data[x:x+self.master.get_chunksize()] \\\n\t\t\tfor x in range(0, len(data), self.master.get_chunksize()) ]\n\t\t#chunkservers = self.master.get_chunkservers()\n\t\tself.update_chunkservers()\n\t\t#print \"CHUNKSERVERS: \", self.chunkservers\n\t\tfor i in range(0, len(chunkuuids)): # write to each chunkserver\n\t\t\tchunkuuid = chunkuuids[i]\n\t\t\tchunklocs = self.master.get_chunklocs(chunkuuid)\n\t\t\tfor chunkloc in chunklocs:\n\t\t\t\t#print \"chunkloc: \", chunkloc\n\t\t\t\tself.chunkservers[chunkloc].rpc.write(chunkuuid, chunks[i])\n\n\tdef num_chunks(self, size):\n\t\treturn (size // self.master.get_chunksize()) \\\n\t\t\t+ (1 if size % self.master.get_chunksize() > 0 else 0)\n\n\tdef write_append(self, filename, data):\n\t\tif not self.exists(filename):\n\t\t\traise Exception(\"append error, file does not exist: \" + filename)\n\t\tnum_append_chunks = self.num_chunks(len(data))\n\t\tappend_chunkuuids = self.master.alloc_append(filename, \\\n\t\t\tnum_append_chunks)\n\t\tself.write_chunks(append_chunkuuids, data) \n\n\tdef exists(self, filename):\n\t\treturn self.master.exists(filename)\n\t\t\n\tdef read(self, filename): # get metadata, then read chunks direct\n\t\tif not self.exists(filename):\n\t\t\traise Exception(\"read error, file does not exist: \" + filename)\n\t\tchunks = []\n\t\tchunkuuids = self.master.get_chunkuuids(filename)\n\t\t#chunkservers = self.master.get_chunkservers()\n\t\tself.update_chunkservers()\n\t\tfor chunkuuid in chunkuuids:\n\t\t\tchunklocs = self.master.get_chunklocs(chunkuuid)\n\t\t\tdone_chunkserver = []\n\t\t\tchunk = None\n\t\t\tchunk_read = False\n\t\t\twhile not (chunk_read or len(done_chunkserver)==len(chunklocs)):\n\t\t\t\tchunkidrnd = random.randint(0, len(chunklocs)-1)\n\t\t\t\twhile chunkidrnd not in done_chunkserver and len(done_chunkserver)>0:\n\t\t\t\t\tchunkidrnd = random.randint(0, len(chunklocs)-1)\n\t\t\t\tchunkloc = chunklocs[chunkidrnd]\n\t\t\t\tprint \"Select chunkloc %s from %d choices\" % (chunkloc, len(chunklocs))\n\t\t\t\ttry:\n\t\t\t\t\tchunk = self.chunkservers[chunkloc].rpc.read(chunkuuid)\n\t\t\t\t\tchunk_read = True\n\t\t\t\t\tdone_chunkserver.append(chunkidrnd)\n\t\t\t\texcept:\n\t\t\t\t\tprint \"Chunkserver %d failed\" % chunkidrnd\n\t\t\tif not chunk_read:\n\t\t\t\traise Exception(\"read error, chunkserver unavailable: \" + filename)\n\t\t\tchunks.append(chunk)\n\t\tdata = reduce(lambda x, y: x + y, chunks) # reassemble in order\n\t\treturn data\n\n\tdef delete(self, filename):\n\t\tself.master.delete(filename)\n\ndef main():\n\tparser = argparse.ArgumentParser(description='EAFS Simple Client')\n\tparser.add_argument('--master', dest='master', default='localhost:6799', help='Master server address')\n\targs = parser.parse_args()\n\tmaster = 'http://' + args.master\n\t\n\tclient = EAFSClient(master)\n\t\n\t# test write, exist, read\n\tprint \"\\nWriting...\"\n\t#try:\n\tif False:\n\t\tclient.write(\"/usr/python/readme.txt\", \"\"\"\n\t\tThis file tells you all about python that you ever wanted to know.\n\t\tNot every README is as informative as this one, but we aim to please.\n\t\tNever yet has there been so much information in so little space.\n\t\t\"\"\")\n\t#except:\n\t# print client.master.dump_metadata()\n\tprint \"File exists? \", client.exists(\"/usr/python/readme.txt\")\n\tprint client.read(\"/usr/python/readme.txt\")\n\t# show structure of the filesystem\n\tprint \"\\nMetadata Dump...\" \n\tprint client.master.dump_metadata()\n\nif __name__ == \"__main__\":\n\tmain()\n\n\"\"\"\n\t# test append, read after append\n\t#print \"\\nAppending...\"\n\t#client.write_append(\"/usr/python/readme.txt\", \\\n\t# \"I'm a little sentence that just snuck in at the end.\\n\")\n\t#print client.read(\"/usr/python/readme.txt\")\n\n\t# test delete\n\t#print \"\\nDeleting...\"\n\t#client.delete(\"/usr/python/readme.txt\")\n\t#print \"File exists? \", client.exists(\"/usr/python/readme.txt\")\n\t\n\t# test exceptions\n\t#print \"\\nTesting Exceptions...\"\n\t#try:\n\t# client.read(\"/usr/python/readme.txt\")\n\t#except Exception as e:\n\t# print \"This exception should be thrown:\", e\n\t#try:\n\t# client.write_append(\"/usr/python/readme.txt\", \"foo\")\n\t#except Exception as e:\n\t# print \"This exception should be thrown:\", e\n\"\"\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
l = input().split("+")
l.sort()
print('+'.join(l))
|
normal
|
{
"blob_id": "30d891c18f3635b7419fa0d0539b2665ad60b22c",
"index": 4748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nl.sort()\nprint('+'.join(l))\n",
"step-3": "l = input().split('+')\nl.sort()\nprint('+'.join(l))\n",
"step-4": "l = input().split(\"+\")\r\r\nl.sort()\r\r\nprint('+'.join(l))\r\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
a[i] = int(a[i])
for i in range(n - 1):
for j in range(n - i - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
print('Sortes array :', a)
<|reserved_special_token_1|>
a = input('Please enter the elements with spaces between them:').split()
n = len(a)
for i in range(n):
a[i] = int(a[i])
for i in range(n - 1):
for j in range(n - i - 1):
if a[j] > a[j + 1]:
a[j], a[j + 1] = a[j + 1], a[j]
print('Sortes array :', a)
<|reserved_special_token_1|>
a=input("Please enter the elements with spaces between them:").split()
n=len(a)
for i in range(n):
a[i]=int(a[i])
for i in range(n-1):
for j in range(n-i-1):
if a[j]>a[j+1]:
a[j],a[j+1]=a[j+1],a[j]
print("Sortes array :",a)
|
flexible
|
{
"blob_id": "5c2a6802e89314c25f0264bbe2bc7ed2689a255a",
"index": 782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-3": "a = input('Please enter the elements with spaces between them:').split()\nn = len(a)\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-4": "a=input(\"Please enter the elements with spaces between them:\").split()\nn=len(a)\nfor i in range(n):\n a[i]=int(a[i])\nfor i in range(n-1):\n for j in range(n-i-1):\n if a[j]>a[j+1]:\n a[j],a[j+1]=a[j+1],a[j]\nprint(\"Sortes array :\",a)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
<|reserved_special_token_0|>
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
<|reserved_special_token_0|>
for tweet in tweepy.Cursor(api.search, q='#ootd', count=100,
include_entities=True, lang='en', since='2018-11-01').items():
if 'media' in tweet.entities:
for image in tweet.entities['media']:
favs = tweet.favorite_count
if favs > 30:
csvWriter.writerow([favs, image['media_url'], tweet.created_at]
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
auth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',
'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
auth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',
'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
api = tweepy.API(auth, wait_on_rate_limit=True)
csvFile = open('data.csv', 'a')
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search, q='#ootd', count=100,
include_entities=True, lang='en', since='2018-11-01').items():
if 'media' in tweet.entities:
for image in tweet.entities['media']:
favs = tweet.favorite_count
if favs > 30:
csvWriter.writerow([favs, image['media_url'], tweet.created_at]
)
<|reserved_special_token_1|>
import csv
import tweepy
import pandas as pd
from tweepy.auth import OAuthHandler
auth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',
'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
auth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',
'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',
'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
api = tweepy.API(auth, wait_on_rate_limit=True)
csvFile = open('data.csv', 'a')
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search, q='#ootd', count=100,
include_entities=True, lang='en', since='2018-11-01').items():
if 'media' in tweet.entities:
for image in tweet.entities['media']:
favs = tweet.favorite_count
if favs > 30:
csvWriter.writerow([favs, image['media_url'], tweet.created_at]
)
<|reserved_special_token_1|>
import csv
import tweepy
import pandas as pd
####input your credentials here
from tweepy.auth import OAuthHandler
auth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj', 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7', 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
auth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj', 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')
auth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7', 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')
api = tweepy.API(auth,wait_on_rate_limit=True)
csvFile = open('data.csv', 'a')
csvWriter = csv.writer(csvFile)
for tweet in tweepy.Cursor(api.search,q="#ootd",count=100,
include_entities=True,
lang="en",
since="2018-11-01").items():
if 'media' in tweet.entities:
for image in tweet.entities['media']:
favs = tweet.favorite_count
if favs > 30:
csvWriter.writerow([favs, image['media_url'], tweet.created_at])
|
flexible
|
{
"blob_id": "b68cc09347584dfc613b2e38d036b124c9af7952",
"index": 1904,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\n<mask token>\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\n<mask token>\nfor tweet in tweepy.Cursor(api.search, q='#ootd', count=100,\n include_entities=True, lang='en', since='2018-11-01').items():\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n favs = tweet.favorite_count\n if favs > 30:\n csvWriter.writerow([favs, image['media_url'], tweet.created_at]\n )\n",
"step-3": "<mask token>\nauth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',\n 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\nauth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',\n 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\napi = tweepy.API(auth, wait_on_rate_limit=True)\ncsvFile = open('data.csv', 'a')\ncsvWriter = csv.writer(csvFile)\nfor tweet in tweepy.Cursor(api.search, q='#ootd', count=100,\n include_entities=True, lang='en', since='2018-11-01').items():\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n favs = tweet.favorite_count\n if favs > 30:\n csvWriter.writerow([favs, image['media_url'], tweet.created_at]\n )\n",
"step-4": "import csv\nimport tweepy\nimport pandas as pd\nfrom tweepy.auth import OAuthHandler\nauth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',\n 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\nauth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj',\n 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7',\n 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\napi = tweepy.API(auth, wait_on_rate_limit=True)\ncsvFile = open('data.csv', 'a')\ncsvWriter = csv.writer(csvFile)\nfor tweet in tweepy.Cursor(api.search, q='#ootd', count=100,\n include_entities=True, lang='en', since='2018-11-01').items():\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n favs = tweet.favorite_count\n if favs > 30:\n csvWriter.writerow([favs, image['media_url'], tweet.created_at]\n )\n",
"step-5": "import csv\nimport tweepy\nimport pandas as pd\n####input your credentials here\nfrom tweepy.auth import OAuthHandler\nauth = OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj', 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7', 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\n\nauth = tweepy.OAuthHandler('WNUpykrIjiGF0NKoV7qk7uiNj', 'Nhe0GjOkbaQKbPMLTqcAYQnqMnz3Edpdup28h2R2KqRLa6iBDN')\nauth.set_access_token('956917059287375875-EThit80MxgQPTJlh7ZObqyHsoV8Q2D7', 'eLv893meGppqfX3xOr8SJ93kpsbZpoOiRsVM3XTgJryZM')\napi = tweepy.API(auth,wait_on_rate_limit=True)\ncsvFile = open('data.csv', 'a')\ncsvWriter = csv.writer(csvFile)\n\nfor tweet in tweepy.Cursor(api.search,q=\"#ootd\",count=100,\n include_entities=True,\n lang=\"en\",\n since=\"2018-11-01\").items():\n\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n favs = tweet.favorite_count\n if favs > 30:\n csvWriter.writerow([favs, image['media_url'], tweet.created_at])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List
import scrapy
from cssselect import Selector
class RwidSpider(scrapy.Spider):
name = 'rwid'
allowed_domains = ['0.0.0.0']
# REQUEST LOGIN DARI URLS
start_urls = ['http://0.0.0.0:9999/']
# LOGIN DISINI
def parse(self, response):
# apa bedanya yield & return
# yield {"title": response.css("title::text").get()}
# cek di inspect element perlu login tidak?
data = {
"username": "user",
"password": "user12345"
}
# cek di FormRequest butuhnya apa aja
return scrapy.FormRequest(
url="http://0.0.0.0:9999/login",
formdata=data,
callback=self.after_login # untuk mengektraksi data
)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
# get detail product
detail_products: List[Selector] = response.css(".card .card-title a")
for detail in detail_products:
href = detail.attrib.get("href") # untuk mendapatkan urls
yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy
yield {"title": response.css("title::text").get()}
def parse_detail(self, response):
yield {"title": response.css("title::text").get()}
|
normal
|
{
"blob_id": "2185d332f7cd4cbf17d6b72a19297d156c2182a1",
"index": 2233,
"step-1": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n <mask token>\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-2": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-3": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-4": "from typing import List\nimport scrapy\nfrom cssselect import Selector\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-5": "from typing import List\n\nimport scrapy\nfrom cssselect import Selector\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n\n # REQUEST LOGIN DARI URLS\n start_urls = ['http://0.0.0.0:9999/']\n\n # LOGIN DISINI\n def parse(self, response):\n # apa bedanya yield & return\n # yield {\"title\": response.css(\"title::text\").get()}\n\n # cek di inspect element perlu login tidak?\n\n data = {\n \"username\": \"user\",\n \"password\": \"user12345\"\n }\n\n # cek di FormRequest butuhnya apa aja\n return scrapy.FormRequest(\n url=\"http://0.0.0.0:9999/login\",\n formdata=data,\n callback=self.after_login # untuk mengektraksi data\n )\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n\n # get detail product\n detail_products: List[Selector] = response.css(\".card .card-title a\")\n for detail in detail_products:\n href = detail.attrib.get(\"href\") # untuk mendapatkan urls\n yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy\n\n yield {\"title\": response.css(\"title::text\").get()}\n\n def parse_detail(self, response):\n yield {\"title\": response.css(\"title::text\").get()}\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from collections import Counter
import generator.resume_parser as resume_parser
import os
import json
class TestResumeParser(TestCase):
def load_resume(self, resume_name):
path_to_directory = "generator/fixtures/{resume_name}.pdf".format(resume_name=resume_name)
file_path = os.path.abspath(path_to_directory)
json_string = resume_parser.convert(file_path)
json_file = json.loads(json_string)
return json_file
def convert_to_counter(self, json_file):
counter = json_file["counter"]
return Counter(counter)
def generate_counter(self, resume_name):
json_file = self.load_resume(resume_name)
return self.convert_to_counter(json_file)
def generate_name(self, resume_name):
json_file = self.load_resume(resume_name)
return json_file["name"]
def generate_email(self, resume_name):
json_file = self.load_resume(resume_name)
return json_file["email"]
def test_parse_tariq_ali_profile_counter(self):
expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3, 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1, 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})
actual_counter = self.generate_counter("TariqAliProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_tariq_ali_profile_name(self):
expected_name = "Tariq Ali"
actual_name = self.generate_name("TariqAliProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_tariq_ali_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("TariqAliProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_second_tariq_ali_profile_counter(self):
expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3, 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++': 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1, '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1, 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})
actual_counter = self.generate_counter("Tariq_Ali")
self.assertEqual(expected_counter, actual_counter)
def test_parse_second_tariq_ali_profile_name(self):
expected_name = "Tariq\xa0Ali"
actual_name = self.generate_name("Tariq_Ali")
self.assertEqual(expected_name, actual_name)
def test_parse_second_tariq_ali_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("Tariq_Ali")
self.assertEqual(expected_email, actual_email)
def test_parse_dan_bernier_profile_counter(self):
expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3, 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1, 'Scheme': 1})
actual_counter = self.generate_counter("DanBernierProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_dan_bernier_profile_name(self):
expected_name = "Dan Bernier"
actual_name = self.generate_name("DanBernierProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_dan_bernier_profile_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("DanBernierProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_dylan_hirschkorn_profile_counter(self):
expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC': 3, 'C#': 2, 'Swift': 1})
# This is a bug, Dylan only mentioned "Visual Basic", not "Basic" on his resume. However, I do not know of a good way of fixing this specific edge case. Also, Dylan is the name of a programming language, which is why Dylan shows up in the counter.
actual_counter = self.generate_counter("DylanHirschkornProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_dylan_hirschkorn_profile_name(self):
expected_name = "Dylan Hirschkorn"
actual_name = self.generate_name("DylanHirschkornProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_dylan_hirschkorn_profile_email(self):
expected_email = ""
actual_email = self.generate_email("DylanHirschkornProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_sean_dugan_murphy_profile_counter(self):
expected_counter = Counter({'Swift': 11, 'Twitter': 3, 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2, 'CSS': 1, 'C#': 1})
actual_counter = self.generate_counter("SeanDuganMurphyProfile")
self.assertEqual(expected_counter, actual_counter)
def test_parse_sean_dugan_murphy_profile_name(self):
# The full name of the candidate is Sean Dugan Murphy. However we assume that a candidate only has a first and last name...and ignore the edge case where a candidate has a middle name.
expected_name = "Sean Dugan"
actual_name = self.generate_name("SeanDuganMurphyProfile")
self.assertEqual(expected_name, actual_name)
def test_parse_sean_dugan_murphy_profile_email(self):
expected_email = ""
actual_email = self.generate_email("SeanDuganMurphyProfile")
self.assertEqual(expected_email, actual_email)
def test_parse_christopher_salat_ceev_counter(self):
# Note that Christopher Salat does not actually know either PHP or Scratch. He links to several websites that end with the .php extension and he serves as a Scratch DJ. This indicates a problem with relying solely on keywords detached from the context.
expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})
actual_counter = self.generate_counter("Christopher_Salat_Ceev")
self.assertEqual(expected_counter, actual_counter)
def test_parse_christopher_salat_ceev_name(self):
expected_name = "Christopher Salat"
actual_name = self.generate_name("Christopher_Salat_Ceev")
self.assertEqual(expected_name, actual_name)
def test_parse_christopher_salat_ceev_email(self):
expected_email = "[email protected]"
actual_email = self.generate_email("Christopher_Salat_Ceev")
self.assertEqual(expected_email, actual_email)
|
normal
|
{
"blob_id": "4bbfb35e4b03e2bfd46dd0fe5bfd54fb01ba11df",
"index": 1996,
"step-1": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n <mask token>\n <mask token>\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n <mask token>\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n <mask token>\n <mask token>\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n <mask token>\n <mask token>\n <mask token>\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n <mask token>\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n <mask token>\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n <mask token>\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-3": "<mask token>\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('DanBernierProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = 'Christopher Salat'\n actual_name = self.generate_name('Christopher_Salat_Ceev')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-4": "from __future__ import unicode_literals\nfrom django.test import TestCase\nfrom collections import Counter\nimport generator.resume_parser as resume_parser\nimport os\nimport json\n\n\nclass TestResumeParser(TestCase):\n\n def load_resume(self, resume_name):\n path_to_directory = 'generator/fixtures/{resume_name}.pdf'.format(\n resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file['counter']\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['name']\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file['email']\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3,\n 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': \n 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1,\n 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter('TariqAliProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = 'Tariq Ali'\n actual_name = self.generate_name('TariqAliProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('TariqAliProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3,\n 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++':\n 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1,\n '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1,\n 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter('Tariq_Ali')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = 'Tariq\\xa0Ali'\n actual_name = self.generate_name('Tariq_Ali')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Tariq_Ali')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3,\n 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1,\n 'Scheme': 1})\n actual_counter = self.generate_counter('DanBernierProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = 'Dan Bernier'\n actual_name = self.generate_name('DanBernierProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('DanBernierProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC':\n 3, 'C#': 2, 'Swift': 1})\n actual_counter = self.generate_counter('DylanHirschkornProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = 'Dylan Hirschkorn'\n actual_name = self.generate_name('DylanHirschkornProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('DylanHirschkornProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3,\n 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2,\n 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter('SeanDuganMurphyProfile')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n expected_name = 'Sean Dugan'\n actual_name = self.generate_name('SeanDuganMurphyProfile')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = ''\n actual_email = self.generate_email('SeanDuganMurphyProfile')\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter('Christopher_Salat_Ceev')\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = 'Christopher Salat'\n actual_name = self.generate_name('Christopher_Salat_Ceev')\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = '[email protected]'\n actual_email = self.generate_email('Christopher_Salat_Ceev')\n self.assertEqual(expected_email, actual_email)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\n\nfrom collections import Counter\n\nimport generator.resume_parser as resume_parser\nimport os\nimport json\n\nclass TestResumeParser(TestCase):\n def load_resume(self, resume_name):\n path_to_directory = \"generator/fixtures/{resume_name}.pdf\".format(resume_name=resume_name)\n file_path = os.path.abspath(path_to_directory)\n json_string = resume_parser.convert(file_path)\n json_file = json.loads(json_string)\n return json_file\n\n def convert_to_counter(self, json_file):\n counter = json_file[\"counter\"]\n return Counter(counter)\n\n def generate_counter(self, resume_name):\n json_file = self.load_resume(resume_name)\n return self.convert_to_counter(json_file)\n\n def generate_name(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file[\"name\"]\n\n def generate_email(self, resume_name):\n json_file = self.load_resume(resume_name)\n return json_file[\"email\"]\n\n def test_parse_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 8, 'Rails': 5, 'WordPress': 3, 'Bootstrap': 2, 'JavaScript': 1, 'jQuery': 1, '.NET': 1, 'C#': 1, 'RSpec': 1, 'Sinatra': 1, 'C++': 1, 'Angular': 1, 'Javascript': 1, 'Ethereum': 1, 'blockchain': 1})\n actual_counter = self.generate_counter(\"TariqAliProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_tariq_ali_profile_name(self):\n expected_name = \"Tariq Ali\"\n actual_name = self.generate_name(\"TariqAliProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_tariq_ali_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"TariqAliProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_second_tariq_ali_profile_counter(self):\n expected_counter = Counter({'Ruby': 15, 'Rails': 5, 'WordPress': 3, 'Angular': 3, 'Sinatra': 2, 'jQuery': 2, 'JavaScript': 2, 'C++': 2, 'Twitter': 2, 'Javascript': 2, 'Bootstrap': 2, 'GitHub': 1, '.NET': 1, 'RSpec': 1, 'blockchain': 1, 'Ethereum': 1, 'Capistrano': 1, 'AWS': 1, 'C#': 1, 'React': 1})\n actual_counter = self.generate_counter(\"Tariq_Ali\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_second_tariq_ali_profile_name(self):\n expected_name = \"Tariq\\xa0Ali\"\n actual_name = self.generate_name(\"Tariq_Ali\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_second_tariq_ali_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"Tariq_Ali\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dan_bernier_profile_counter(self):\n expected_counter = Counter({'Ruby': 7, 'Processing': 4, 'C#': 3, 'Rails': 2, 'Javascript': 1, '.NET': 1, 'JavaScript': 1, 'Scheme': 1})\n actual_counter = self.generate_counter(\"DanBernierProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dan_bernier_profile_name(self):\n expected_name = \"Dan Bernier\"\n actual_name = self.generate_name(\"DanBernierProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dan_bernier_profile_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"DanBernierProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_dylan_hirschkorn_profile_counter(self):\n expected_counter = Counter({'Dylan': 3, 'Visual Basic': 3, 'BASIC': 3, 'C#': 2, 'Swift': 1})\n # This is a bug, Dylan only mentioned \"Visual Basic\", not \"Basic\" on his resume. However, I do not know of a good way of fixing this specific edge case. Also, Dylan is the name of a programming language, which is why Dylan shows up in the counter.\n actual_counter = self.generate_counter(\"DylanHirschkornProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_dylan_hirschkorn_profile_name(self):\n expected_name = \"Dylan Hirschkorn\"\n actual_name = self.generate_name(\"DylanHirschkornProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_dylan_hirschkorn_profile_email(self):\n expected_email = \"\"\n actual_email = self.generate_email(\"DylanHirschkornProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_sean_dugan_murphy_profile_counter(self):\n expected_counter = Counter({'Swift': 11, 'Twitter': 3, 'Objective-C': 3, 'Facebook': 3, 'GitHub': 2, 'YouTube': 2, 'CSS': 1, 'C#': 1})\n actual_counter = self.generate_counter(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_sean_dugan_murphy_profile_name(self):\n # The full name of the candidate is Sean Dugan Murphy. However we assume that a candidate only has a first and last name...and ignore the edge case where a candidate has a middle name.\n expected_name = \"Sean Dugan\"\n actual_name = self.generate_name(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_sean_dugan_murphy_profile_email(self):\n expected_email = \"\"\n actual_email = self.generate_email(\"SeanDuganMurphyProfile\")\n self.assertEqual(expected_email, actual_email)\n\n def test_parse_christopher_salat_ceev_counter(self):\n # Note that Christopher Salat does not actually know either PHP or Scratch. He links to several websites that end with the .php extension and he serves as a Scratch DJ. This indicates a problem with relying solely on keywords detached from the context.\n expected_counter = Counter({'YouTube': 5, 'PHP': 2, 'Scratch': 1})\n actual_counter = self.generate_counter(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_counter, actual_counter)\n\n def test_parse_christopher_salat_ceev_name(self):\n expected_name = \"Christopher Salat\"\n actual_name = self.generate_name(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_name, actual_name)\n\n def test_parse_christopher_salat_ceev_email(self):\n expected_email = \"[email protected]\"\n actual_email = self.generate_email(\"Christopher_Salat_Ceev\")\n self.assertEqual(expected_email, actual_email)\n",
"step-ids": [
10,
22,
24,
25,
26
]
}
|
[
10,
22,
24,
25,
26
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##########
# websocket-client
# https://pypi.python.org/pypi/websocket-client/
# sudo -H pip install websocket-client
#####
from websocket import create_connection
ws = create_connection( "ws://192.168.1.132:81/python" )
msg = '#0000FF'
print "Envoi d’un message à l’ESP"
print( msg )
ws.send( msg )
print "Fin de l’envoi\n"
print "Réception..."
result = ws.recv()
print "Reçu : '%s'" % result
ws.close()
|
normal
|
{
"blob_id": "3b26181097025add5919e752aa53e57eea49c943",
"index": 4923,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##########\n# websocket-client\n# https://pypi.python.org/pypi/websocket-client/\n# sudo -H pip install websocket-client\n#####\n\nfrom websocket import create_connection\nws = create_connection( \"ws://192.168.1.132:81/python\" )\n\nmsg = '#0000FF'\nprint \"Envoi d’un message à l’ESP\"\nprint( msg )\nws.send( msg )\nprint \"Fin de l’envoi\\n\"\n\nprint \"Réception...\"\nresult = ws.recv()\nprint \"Reçu : '%s'\" % result\nws.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""empty message
Revision ID: 0bb5933fe69f
Revises: 09c6fdb3cf81
Create Date: 2021-03-11 16:48:06.771046
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0bb5933fe69f'
down_revision = '09c6fdb3cf81'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))
op.create_unique_constraint(None, 'user', ['password'])
op.create_unique_constraint(None, 'user', ['email'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_column('user', 'money')
# ### end Alembic commands ###
|
normal
|
{
"blob_id": "f727c0551f20fb0dc72b4d81b7b3ed8ce9b1b6f4",
"index": 2072,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-3": "<mask token>\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 0bb5933fe69f\nRevises: 09c6fdb3cf81\nCreate Date: 2021-03-11 16:48:06.771046\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0bb5933fe69f'\ndown_revision = '09c6fdb3cf81'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('money', sa.Integer(), nullable=False))\n op.create_unique_constraint(None, 'user', ['password'])\n op.create_unique_constraint(None, 'user', ['email'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_constraint(None, 'user', type_='unique')\n op.drop_column('user', 'money')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.views import generic
from .models import GPS
# This is the view for my home page. It is a list view because it needs to display a list of all
# of the GPS units that are currently in the database.
class HomeView(generic.ListView):
model = GPS
template_name = 'inv_templates/home.html'
context_object_name = 'unit'
# This is the view for my add item page.
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
# This is the view for my remove item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
# This is the view for my update item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
# This is the view for my check out item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked in.
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
# This is the view for my check in item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked out.
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
|
normal
|
{
"blob_id": "67db3a66e5525d41de13df665167a0db2d81056e",
"index": 2721,
"step-1": "<mask token>\n\n\nclass Remove_ItemView(generic.ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-2": "<mask token>\n\n\nclass HomeView(generic.ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-3": "<mask token>\n\n\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-4": "from django.views import generic\nfrom .models import GPS\n\n\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-5": "from django.views import generic\nfrom .models import GPS\n# This is the view for my home page. It is a list view because it needs to display a list of all\n# of the GPS units that are currently in the database.\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n# This is the view for my add item page.\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n# This is the view for my remove item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently in the database.\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n# This is the view for my update item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently in the database.\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n# This is the view for my check out item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently checked in.\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n# This is the view for my check in item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently checked out.\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-ids": [
7,
11,
12,
13,
14
]
}
|
[
7,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def something3():
x = session.query(models.Review).filter(models.Review.time < end_time
).count()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
something1
<|reserved_special_token_0|>
something2
<|reserved_special_token_0|>
def something3():
x = session.query(models.Review).filter(models.Review.time < end_time
).count()
something4
<|reserved_special_token_0|>
something5
<|reserved_special_token_1|>
something1
x = session.query(x).filter(y).count()
something2
y = session.query(models.User, models.X).filter(models.User.time >
start_time, models.User.id == user_id).count()
def something3():
x = session.query(models.Review).filter(models.Review.time < end_time
).count()
something4
x = session.query(x, y).filter(bla).count()
x = session.query(x.X, y).filter(y > user_id).count()
x = session.query(x.X, y.Y).filter(x.X == 5).count()
something5
<|reserved_special_token_1|>
something1
x = session.query(x).filter(y).count()
something2
y = session.query(
models.User, models.X,
).filter(
models.User.time > start_time,
models.User.id == user_id,
).count()
def something3():
x = session.query(
models.Review,
).filter(
models.Review.time < end_time,
).count()
something4
x = session.query(x, y).filter(bla).count()
x = session.query(x.X, y).filter(y > user_id).count()
x = session.query(
x.X, y.Y
).filter(x.X == 5).count()
something5
|
flexible
|
{
"blob_id": "5b91b7025b0e574d45f95a0585128018d83c17ea",
"index": 563,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\n<mask token>\n",
"step-3": "something1\n<mask token>\nsomething2\n<mask token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\n<mask token>\nsomething5\n",
"step-4": "something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(models.User, models.X).filter(models.User.time >\n start_time, models.User.id == user_id).count()\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(x.X, y.Y).filter(x.X == 5).count()\nsomething5\n",
"step-5": "something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(\n models.User, models.X,\n).filter(\n models.User.time > start_time,\n models.User.id == user_id,\n).count()\ndef something3():\n x = session.query(\n models.Review,\n ).filter(\n models.Review.time < end_time,\n ).count()\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(\n x.X, y.Y\n).filter(x.X == 5).count()\nsomething5\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StonewallConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StonewallConfig(AppConfig):
name = 'stonewall'
<|reserved_special_token_1|>
from django.apps import AppConfig
class StonewallConfig(AppConfig):
name = 'stonewall'
|
flexible
|
{
"blob_id": "8364264851895ccabeb74fd3fab1d4f39da717f8",
"index": 8398,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StonewallConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass StonewallConfig(AppConfig):\n name = 'stonewall'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass StonewallConfig(AppConfig):\n name = 'stonewall'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_nearest_method(method_name, parser):
"""
all candidates toked
all protocol untoked
input:
queries:
[
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
]
output:
[
nearest_idx1,
nearest_idx2,
nearest_idx3,
...
]
"""
return _method_adaptors[method_name](parser)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_nearest_method(method_name, parser):
"""
all candidates toked
all protocol untoked
input:
queries:
[
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
]
output:
[
nearest_idx1,
nearest_idx2,
nearest_idx3,
...
]
"""
return _method_adaptors[method_name](parser)
def get_method_names():
return list(_method_adaptors.keys())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register_dist_adaptor(method_name):
def decorator(func):
_method_adaptors[method_name] = func
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator
def get_nearest_method(method_name, parser):
"""
all candidates toked
all protocol untoked
input:
queries:
[
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
]
output:
[
nearest_idx1,
nearest_idx2,
nearest_idx3,
...
]
"""
return _method_adaptors[method_name](parser)
def get_method_names():
return list(_method_adaptors.keys())
<|reserved_special_token_1|>
_method_adaptors = dict()
def register_dist_adaptor(method_name):
def decorator(func):
_method_adaptors[method_name] = func
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator
def get_nearest_method(method_name, parser):
"""
all candidates toked
all protocol untoked
input:
queries:
[
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
]
output:
[
nearest_idx1,
nearest_idx2,
nearest_idx3,
...
]
"""
return _method_adaptors[method_name](parser)
def get_method_names():
return list(_method_adaptors.keys())
|
flexible
|
{
"blob_id": "ed2f3bbc7eb0a4d8f5ccdb7a12e00cbddab04dd0",
"index": 577,
"step-1": "<mask token>\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"step-3": "<mask token>\n\n\ndef register_dist_adaptor(method_name):\n\n def decorator(func):\n _method_adaptors[method_name] = func\n\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper\n return decorator\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"step-4": "_method_adaptors = dict()\n\n\ndef register_dist_adaptor(method_name):\n\n def decorator(func):\n _method_adaptors[method_name] = func\n\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper\n return decorator\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""Form content type."""
from briefy.plone.content.interfaces import IBriefyContent
from plone.dexterity.content import Container
from zope.interface import implementer
class IForm(IBriefyContent):
"""Interface for a Composite Page."""
@implementer(IForm)
class Form(Container):
"""A Form."""
|
normal
|
{
"blob_id": "6e3de57f7c65e9f6195dabc3326b05744249cefe",
"index": 7991,
"step-1": "<mask token>\n\n\n@implementer(IForm)\nclass Form(Container):\n \"\"\"A Form.\"\"\"\n",
"step-2": "<mask token>\n\n\nclass IForm(IBriefyContent):\n <mask token>\n\n\n@implementer(IForm)\nclass Form(Container):\n \"\"\"A Form.\"\"\"\n",
"step-3": "<mask token>\n\n\nclass IForm(IBriefyContent):\n \"\"\"Interface for a Composite Page.\"\"\"\n\n\n@implementer(IForm)\nclass Form(Container):\n \"\"\"A Form.\"\"\"\n",
"step-4": "<mask token>\nfrom briefy.plone.content.interfaces import IBriefyContent\nfrom plone.dexterity.content import Container\nfrom zope.interface import implementer\n\n\nclass IForm(IBriefyContent):\n \"\"\"Interface for a Composite Page.\"\"\"\n\n\n@implementer(IForm)\nclass Form(Container):\n \"\"\"A Form.\"\"\"\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Form content type.\"\"\"\nfrom briefy.plone.content.interfaces import IBriefyContent\nfrom plone.dexterity.content import Container\nfrom zope.interface import implementer\n\n\nclass IForm(IBriefyContent):\n \"\"\"Interface for a Composite Page.\"\"\"\n\n\n@implementer(IForm)\nclass Form(Container):\n \"\"\"A Form.\"\"\"\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from os import getenv
LISTEN_IP = getenv('LISTEN_IP', '0.0.0.0')
LISTEN_PORT = int(getenv('LISTEN_PORT', 51273))
LISTEN_ADDRESS = LISTEN_IP, LISTEN_PORT
CONFIRMATION = getenv('CONFIRMATION')
if CONFIRMATION:
CONFIRMATION = CONFIRMATION.encode()
class UDPProtocol:
def __init__(self, consumer):
self.consumer = consumer
self.transport = None
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
pass
def datagram_received(self, packet, address):
# WARNING: some kind of filtering should be there for the real app
self.consumer.consume_packet(packet)
if CONFIRMATION:
self.transport.sendto(CONFIRMATION, address)
def start(self):
loop = self.consumer.loop
coroutine = loop.create_datagram_endpoint(lambda: self, LISTEN_ADDRESS,
reuse_port=True)
loop.run_until_complete(coroutine)
def stop(self):
self.transport.close()
|
normal
|
{
"blob_id": "cca543f461724c3aac8fef23ef648883962bd706",
"index": 4607,
"step-1": "<mask token>\n\n\nclass UDPProtocol:\n <mask token>\n\n def connection_made(self, transport):\n self.transport = transport\n <mask token>\n <mask token>\n <mask token>\n\n def stop(self):\n self.transport.close()\n",
"step-2": "<mask token>\n\n\nclass UDPProtocol:\n\n def __init__(self, consumer):\n self.consumer = consumer\n self.transport = None\n\n def connection_made(self, transport):\n self.transport = transport\n\n def connection_lost(self, exc):\n pass\n <mask token>\n <mask token>\n\n def stop(self):\n self.transport.close()\n",
"step-3": "<mask token>\nif CONFIRMATION:\n CONFIRMATION = CONFIRMATION.encode()\n\n\nclass UDPProtocol:\n\n def __init__(self, consumer):\n self.consumer = consumer\n self.transport = None\n\n def connection_made(self, transport):\n self.transport = transport\n\n def connection_lost(self, exc):\n pass\n\n def datagram_received(self, packet, address):\n self.consumer.consume_packet(packet)\n if CONFIRMATION:\n self.transport.sendto(CONFIRMATION, address)\n\n def start(self):\n loop = self.consumer.loop\n coroutine = loop.create_datagram_endpoint(lambda : self,\n LISTEN_ADDRESS, reuse_port=True)\n loop.run_until_complete(coroutine)\n\n def stop(self):\n self.transport.close()\n",
"step-4": "<mask token>\nLISTEN_IP = getenv('LISTEN_IP', '0.0.0.0')\nLISTEN_PORT = int(getenv('LISTEN_PORT', 51273))\nLISTEN_ADDRESS = LISTEN_IP, LISTEN_PORT\nCONFIRMATION = getenv('CONFIRMATION')\nif CONFIRMATION:\n CONFIRMATION = CONFIRMATION.encode()\n\n\nclass UDPProtocol:\n\n def __init__(self, consumer):\n self.consumer = consumer\n self.transport = None\n\n def connection_made(self, transport):\n self.transport = transport\n\n def connection_lost(self, exc):\n pass\n\n def datagram_received(self, packet, address):\n self.consumer.consume_packet(packet)\n if CONFIRMATION:\n self.transport.sendto(CONFIRMATION, address)\n\n def start(self):\n loop = self.consumer.loop\n coroutine = loop.create_datagram_endpoint(lambda : self,\n LISTEN_ADDRESS, reuse_port=True)\n loop.run_until_complete(coroutine)\n\n def stop(self):\n self.transport.close()\n",
"step-5": "from os import getenv\n\n\nLISTEN_IP = getenv('LISTEN_IP', '0.0.0.0')\nLISTEN_PORT = int(getenv('LISTEN_PORT', 51273))\nLISTEN_ADDRESS = LISTEN_IP, LISTEN_PORT\n\nCONFIRMATION = getenv('CONFIRMATION')\nif CONFIRMATION:\n CONFIRMATION = CONFIRMATION.encode()\n\n\nclass UDPProtocol:\n\n def __init__(self, consumer):\n self.consumer = consumer\n self.transport = None\n\n def connection_made(self, transport):\n self.transport = transport\n\n def connection_lost(self, exc):\n pass\n\n def datagram_received(self, packet, address):\n # WARNING: some kind of filtering should be there for the real app\n self.consumer.consume_packet(packet)\n\n if CONFIRMATION:\n self.transport.sendto(CONFIRMATION, address)\n\n def start(self):\n loop = self.consumer.loop\n coroutine = loop.create_datagram_endpoint(lambda: self, LISTEN_ADDRESS,\n reuse_port=True)\n loop.run_until_complete(coroutine)\n\n def stop(self):\n self.transport.close()\n",
"step-ids": [
3,
5,
8,
9,
11
]
}
|
[
3,
5,
8,
9,
11
] |
<|reserved_special_token_0|>
def populateTimeInterval(rec):
out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')
return rec
def processBatch(data_frame, batchId):
if data_frame.count() > 0:
datasource0 = DynamicFrame.fromDF(data_frame, glueContext,
'from_data_frame').select_fields(['marketplace', 'event_time',
'views'])
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame=
datasource1, connection_type='s3', connection_options={'path':
path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},
format_options={'quoteChar': -1, 'timestamp.formats':
'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=
'datasink1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
job.init(args['JOB_NAME'], args)
<|reserved_special_token_0|>
def populateTimeInterval(rec):
out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')
return rec
def processBatch(data_frame, batchId):
if data_frame.count() > 0:
datasource0 = DynamicFrame.fromDF(data_frame, glueContext,
'from_data_frame').select_fields(['marketplace', 'event_time',
'views'])
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame=
datasource1, connection_type='s3', connection_options={'path':
path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},
format_options={'quoteChar': -1, 'timestamp.formats':
'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=
'datasink1')
<|reserved_special_token_0|>
data_frame_datasource0.printSchema()
glueContext.forEachBatch(frame=data_frame_datasource0, batch_function=
processBatch, options={'windowSize': BATCH_WIN_SIZE,
'checkpointLocation': s3path_chkpt})
job.commit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'srcDBName',
'srcTableName', 'srcFormat', 'l4mBucket', 'l4mBucketPrefix', 'l4mInterval']
)
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
bucketname = args['l4mBucket']
bucketprefix = args['l4mBucketPrefix']
glue_dbname = args['srcDBName']
glue_tablename = args['srcTableName']
src_format = args['srcFormat']
l4m_interval = int(args['l4mInterval'])
s3path_data = 's3://' + bucketname + '/' + bucketprefix + '/data/'
s3path_chkpt = 's3://' + bucketname + '/' + bucketprefix + '/checkpoint/'
DELTA_MINS = datetime.timedelta(minutes=l4m_interval)
TEMP_TS = datetime.datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'
)
BATCH_WIN_SIZE = str(l4m_interval) + ' minutes'
def populateTimeInterval(rec):
out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')
return rec
def processBatch(data_frame, batchId):
if data_frame.count() > 0:
datasource0 = DynamicFrame.fromDF(data_frame, glueContext,
'from_data_frame').select_fields(['marketplace', 'event_time',
'views'])
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame=
datasource1, connection_type='s3', connection_options={'path':
path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},
format_options={'quoteChar': -1, 'timestamp.formats':
'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=
'datasink1')
data_frame_datasource0 = glueContext.create_data_frame.from_catalog(
stream_batch_time=BATCH_WIN_SIZE, database=glue_dbname, table_name=
glue_tablename, transformation_ctx='datasource0', additional_options={
'startingPosition': 'TRIM_HORIZON', 'inferSchema': 'false'})
data_frame_datasource0.printSchema()
glueContext.forEachBatch(frame=data_frame_datasource0, batch_function=
processBatch, options={'windowSize': BATCH_WIN_SIZE,
'checkpointLocation': s3path_chkpt})
job.commit()
<|reserved_special_token_1|>
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue import DynamicFrame
import datetime
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'srcDBName',
'srcTableName', 'srcFormat', 'l4mBucket', 'l4mBucketPrefix', 'l4mInterval']
)
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
bucketname = args['l4mBucket']
bucketprefix = args['l4mBucketPrefix']
glue_dbname = args['srcDBName']
glue_tablename = args['srcTableName']
src_format = args['srcFormat']
l4m_interval = int(args['l4mInterval'])
s3path_data = 's3://' + bucketname + '/' + bucketprefix + '/data/'
s3path_chkpt = 's3://' + bucketname + '/' + bucketprefix + '/checkpoint/'
DELTA_MINS = datetime.timedelta(minutes=l4m_interval)
TEMP_TS = datetime.datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'
)
BATCH_WIN_SIZE = str(l4m_interval) + ' minutes'
def populateTimeInterval(rec):
out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')
return rec
def processBatch(data_frame, batchId):
if data_frame.count() > 0:
datasource0 = DynamicFrame.fromDF(data_frame, glueContext,
'from_data_frame').select_fields(['marketplace', 'event_time',
'views'])
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame=
datasource1, connection_type='s3', connection_options={'path':
path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},
format_options={'quoteChar': -1, 'timestamp.formats':
'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=
'datasink1')
data_frame_datasource0 = glueContext.create_data_frame.from_catalog(
stream_batch_time=BATCH_WIN_SIZE, database=glue_dbname, table_name=
glue_tablename, transformation_ctx='datasource0', additional_options={
'startingPosition': 'TRIM_HORIZON', 'inferSchema': 'false'})
data_frame_datasource0.printSchema()
glueContext.forEachBatch(frame=data_frame_datasource0, batch_function=
processBatch, options={'windowSize': BATCH_WIN_SIZE,
'checkpointLocation': s3path_chkpt})
job.commit()
<|reserved_special_token_1|>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# This code is sample only. Not for use in production.
#
# Author: Babu Srinivasan
# Contact: [email protected], [email protected]
#
# Spark Streaming ETL script
# Input:
# 1/ Kinesis Data Stream source (via AWS Glue Table)
# 2/ Time interval (for Amazon Lookout for Metrics)
# Output:
# 1/ Streaming data (selected columns only) organized by time interval
# Processing:
# 1/ Micro-batch streaming data by time interval
# 2/ Select user specified columns (dimensions & measures) and event_timestamp
# 3/ Output data to S3 sink (organized using S3 prefixes that contains timestamp)
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue import DynamicFrame
import datetime
args = getResolvedOptions(sys.argv, [
"JOB_NAME",
"srcDBName",
"srcTableName",
"srcFormat",
"l4mBucket",
"l4mBucketPrefix",
"l4mInterval"])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# Glue Job parameters - specified in cdk.json during stack deployment
bucketname = args["l4mBucket"]
bucketprefix = args["l4mBucketPrefix"]
glue_dbname = args["srcDBName"]
glue_tablename = args["srcTableName"]
src_format = args["srcFormat"]
l4m_interval = int(args["l4mInterval"]) # in minutes
s3path_data = "s3://" + bucketname + "/" + bucketprefix + "/data/"
s3path_chkpt = "s3://" + bucketname + "/" + bucketprefix + "/checkpoint/"
DELTA_MINS = datetime.timedelta(minutes=l4m_interval)
TEMP_TS = datetime.datetime.strptime("1970-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
BATCH_WIN_SIZE = str(l4m_interval) + " minutes"
# Function to populate time interval based on Event Timestamp.
# This is equivalent to built-in STEP() function in Kinesis Data Analytics SQL application
def populateTimeInterval(rec):
out_ts = (((rec['event_time'] - TEMP_TS) // DELTA_MINS) * DELTA_MINS) + TEMP_TS
rec['intvl_date'] = datetime.datetime.strftime(out_ts, "%Y-%m-%d")
rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, "%H%M")
return rec
# Main processing logic - called from main for each micro-batch of window size determined by time interval
def processBatch(data_frame, batchId):
if (data_frame.count() > 0):
# Convert Data frame to Glue Dynamic Frame and select only dimensions & measures that will be used by Anomaly detection
datasource0 = DynamicFrame.fromDF(data_frame, glueContext, "from_data_frame").select_fields(['marketplace','event_time', 'views'])
# Populate time interval (yyyy-mm-dd & HHMM)
datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)
# datasource1.printSchema()
# Write the dynamic frame to S3 sink with prefix constructed from time interval
path_datasink1 = s3path_data
datasink1 = glueContext.write_dynamic_frame.from_options(frame = datasource1, connection_type = "s3", \
connection_options = {"path": path_datasink1, "partitionKeys": ["intvl_date", "intvl_hhmm"]}, \
format_options={"quoteChar": -1, "timestamp.formats": "yyyy-MM-dd HH:mm:ss"}, \
format = src_format, transformation_ctx = "datasink1")
#### Main
data_frame_datasource0 = glueContext.create_data_frame.from_catalog(stream_batch_time = BATCH_WIN_SIZE, \
database = glue_dbname, table_name = glue_tablename, transformation_ctx = "datasource0", \
additional_options = {"startingPosition": "TRIM_HORIZON", "inferSchema": "false"})
data_frame_datasource0.printSchema()
glueContext.forEachBatch(frame = data_frame_datasource0, batch_function = processBatch, \
options = {"windowSize": BATCH_WIN_SIZE, "checkpointLocation": s3path_chkpt})
job.commit()
|
flexible
|
{
"blob_id": "fcccbc8d582b709aa27500ef28d86103e98eee4c",
"index": 7980,
"step-1": "<mask token>\n\n\ndef populateTimeInterval(rec):\n out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS\n rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')\n rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')\n return rec\n\n\ndef processBatch(data_frame, batchId):\n if data_frame.count() > 0:\n datasource0 = DynamicFrame.fromDF(data_frame, glueContext,\n 'from_data_frame').select_fields(['marketplace', 'event_time',\n 'views'])\n datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)\n path_datasink1 = s3path_data\n datasink1 = glueContext.write_dynamic_frame.from_options(frame=\n datasource1, connection_type='s3', connection_options={'path':\n path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},\n format_options={'quoteChar': -1, 'timestamp.formats':\n 'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=\n 'datasink1')\n\n\n<mask token>\n",
"step-2": "<mask token>\njob.init(args['JOB_NAME'], args)\n<mask token>\n\n\ndef populateTimeInterval(rec):\n out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS\n rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')\n rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')\n return rec\n\n\ndef processBatch(data_frame, batchId):\n if data_frame.count() > 0:\n datasource0 = DynamicFrame.fromDF(data_frame, glueContext,\n 'from_data_frame').select_fields(['marketplace', 'event_time',\n 'views'])\n datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)\n path_datasink1 = s3path_data\n datasink1 = glueContext.write_dynamic_frame.from_options(frame=\n datasource1, connection_type='s3', connection_options={'path':\n path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},\n format_options={'quoteChar': -1, 'timestamp.formats':\n 'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=\n 'datasink1')\n\n\n<mask token>\ndata_frame_datasource0.printSchema()\nglueContext.forEachBatch(frame=data_frame_datasource0, batch_function=\n processBatch, options={'windowSize': BATCH_WIN_SIZE,\n 'checkpointLocation': s3path_chkpt})\njob.commit()\n",
"step-3": "<mask token>\nargs = getResolvedOptions(sys.argv, ['JOB_NAME', 'srcDBName',\n 'srcTableName', 'srcFormat', 'l4mBucket', 'l4mBucketPrefix', 'l4mInterval']\n )\nsc = SparkContext()\nglueContext = GlueContext(sc)\nspark = glueContext.spark_session\njob = Job(glueContext)\njob.init(args['JOB_NAME'], args)\nbucketname = args['l4mBucket']\nbucketprefix = args['l4mBucketPrefix']\nglue_dbname = args['srcDBName']\nglue_tablename = args['srcTableName']\nsrc_format = args['srcFormat']\nl4m_interval = int(args['l4mInterval'])\ns3path_data = 's3://' + bucketname + '/' + bucketprefix + '/data/'\ns3path_chkpt = 's3://' + bucketname + '/' + bucketprefix + '/checkpoint/'\nDELTA_MINS = datetime.timedelta(minutes=l4m_interval)\nTEMP_TS = datetime.datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'\n )\nBATCH_WIN_SIZE = str(l4m_interval) + ' minutes'\n\n\ndef populateTimeInterval(rec):\n out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS\n rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')\n rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')\n return rec\n\n\ndef processBatch(data_frame, batchId):\n if data_frame.count() > 0:\n datasource0 = DynamicFrame.fromDF(data_frame, glueContext,\n 'from_data_frame').select_fields(['marketplace', 'event_time',\n 'views'])\n datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)\n path_datasink1 = s3path_data\n datasink1 = glueContext.write_dynamic_frame.from_options(frame=\n datasource1, connection_type='s3', connection_options={'path':\n path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},\n format_options={'quoteChar': -1, 'timestamp.formats':\n 'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=\n 'datasink1')\n\n\ndata_frame_datasource0 = glueContext.create_data_frame.from_catalog(\n stream_batch_time=BATCH_WIN_SIZE, database=glue_dbname, table_name=\n glue_tablename, transformation_ctx='datasource0', additional_options={\n 'startingPosition': 'TRIM_HORIZON', 'inferSchema': 'false'})\ndata_frame_datasource0.printSchema()\nglueContext.forEachBatch(frame=data_frame_datasource0, batch_function=\n processBatch, options={'windowSize': BATCH_WIN_SIZE,\n 'checkpointLocation': s3path_chkpt})\njob.commit()\n",
"step-4": "import sys\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom awsglue.job import Job\nfrom awsglue import DynamicFrame\nimport datetime\nargs = getResolvedOptions(sys.argv, ['JOB_NAME', 'srcDBName',\n 'srcTableName', 'srcFormat', 'l4mBucket', 'l4mBucketPrefix', 'l4mInterval']\n )\nsc = SparkContext()\nglueContext = GlueContext(sc)\nspark = glueContext.spark_session\njob = Job(glueContext)\njob.init(args['JOB_NAME'], args)\nbucketname = args['l4mBucket']\nbucketprefix = args['l4mBucketPrefix']\nglue_dbname = args['srcDBName']\nglue_tablename = args['srcTableName']\nsrc_format = args['srcFormat']\nl4m_interval = int(args['l4mInterval'])\ns3path_data = 's3://' + bucketname + '/' + bucketprefix + '/data/'\ns3path_chkpt = 's3://' + bucketname + '/' + bucketprefix + '/checkpoint/'\nDELTA_MINS = datetime.timedelta(minutes=l4m_interval)\nTEMP_TS = datetime.datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'\n )\nBATCH_WIN_SIZE = str(l4m_interval) + ' minutes'\n\n\ndef populateTimeInterval(rec):\n out_ts = (rec['event_time'] - TEMP_TS) // DELTA_MINS * DELTA_MINS + TEMP_TS\n rec['intvl_date'] = datetime.datetime.strftime(out_ts, '%Y-%m-%d')\n rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, '%H%M')\n return rec\n\n\ndef processBatch(data_frame, batchId):\n if data_frame.count() > 0:\n datasource0 = DynamicFrame.fromDF(data_frame, glueContext,\n 'from_data_frame').select_fields(['marketplace', 'event_time',\n 'views'])\n datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)\n path_datasink1 = s3path_data\n datasink1 = glueContext.write_dynamic_frame.from_options(frame=\n datasource1, connection_type='s3', connection_options={'path':\n path_datasink1, 'partitionKeys': ['intvl_date', 'intvl_hhmm']},\n format_options={'quoteChar': -1, 'timestamp.formats':\n 'yyyy-MM-dd HH:mm:ss'}, format=src_format, transformation_ctx=\n 'datasink1')\n\n\ndata_frame_datasource0 = glueContext.create_data_frame.from_catalog(\n stream_batch_time=BATCH_WIN_SIZE, database=glue_dbname, table_name=\n glue_tablename, transformation_ctx='datasource0', additional_options={\n 'startingPosition': 'TRIM_HORIZON', 'inferSchema': 'false'})\ndata_frame_datasource0.printSchema()\nglueContext.forEachBatch(frame=data_frame_datasource0, batch_function=\n processBatch, options={'windowSize': BATCH_WIN_SIZE,\n 'checkpointLocation': s3path_chkpt})\njob.commit()\n",
"step-5": "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: MIT-0\n#\n# This code is sample only. Not for use in production.\n#\n# Author: Babu Srinivasan\n# Contact: [email protected], [email protected]\n#\n# Spark Streaming ETL script \n# Input: \n# 1/ Kinesis Data Stream source (via AWS Glue Table)\n# 2/ Time interval (for Amazon Lookout for Metrics)\n# Output: \n# 1/ Streaming data (selected columns only) organized by time interval \n# Processing:\n# 1/ Micro-batch streaming data by time interval\n# 2/ Select user specified columns (dimensions & measures) and event_timestamp \n# 3/ Output data to S3 sink (organized using S3 prefixes that contains timestamp) \n\nimport sys\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom awsglue.job import Job\nfrom awsglue import DynamicFrame\nimport datetime\n\nargs = getResolvedOptions(sys.argv, [\n \"JOB_NAME\", \n \"srcDBName\",\n \"srcTableName\",\n \"srcFormat\",\n \"l4mBucket\",\n \"l4mBucketPrefix\",\n \"l4mInterval\"])\n\nsc = SparkContext()\nglueContext = GlueContext(sc)\nspark = glueContext.spark_session\njob = Job(glueContext)\njob.init(args['JOB_NAME'], args)\n\n# Glue Job parameters - specified in cdk.json during stack deployment\nbucketname = args[\"l4mBucket\"]\nbucketprefix = args[\"l4mBucketPrefix\"]\nglue_dbname = args[\"srcDBName\"]\nglue_tablename = args[\"srcTableName\"]\nsrc_format = args[\"srcFormat\"]\nl4m_interval = int(args[\"l4mInterval\"]) # in minutes\n\ns3path_data = \"s3://\" + bucketname + \"/\" + bucketprefix + \"/data/\"\ns3path_chkpt = \"s3://\" + bucketname + \"/\" + bucketprefix + \"/checkpoint/\"\nDELTA_MINS = datetime.timedelta(minutes=l4m_interval)\nTEMP_TS = datetime.datetime.strptime(\"1970-01-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\nBATCH_WIN_SIZE = str(l4m_interval) + \" minutes\"\n \n \n# Function to populate time interval based on Event Timestamp. \n# This is equivalent to built-in STEP() function in Kinesis Data Analytics SQL application\ndef populateTimeInterval(rec):\n out_ts = (((rec['event_time'] - TEMP_TS) // DELTA_MINS) * DELTA_MINS) + TEMP_TS\n \n rec['intvl_date'] = datetime.datetime.strftime(out_ts, \"%Y-%m-%d\") \n rec['intvl_hhmm'] = datetime.datetime.strftime(out_ts, \"%H%M\")\n return rec\n\n# Main processing logic - called from main for each micro-batch of window size determined by time interval \ndef processBatch(data_frame, batchId): \n if (data_frame.count() > 0):\n # Convert Data frame to Glue Dynamic Frame and select only dimensions & measures that will be used by Anomaly detection\n datasource0 = DynamicFrame.fromDF(data_frame, glueContext, \"from_data_frame\").select_fields(['marketplace','event_time', 'views'])\n # Populate time interval (yyyy-mm-dd & HHMM) \n datasource1 = Map.apply(frame=datasource0, f=populateTimeInterval)\n \n # datasource1.printSchema()\n\n # Write the dynamic frame to S3 sink with prefix constructed from time interval\n path_datasink1 = s3path_data \n datasink1 = glueContext.write_dynamic_frame.from_options(frame = datasource1, connection_type = \"s3\", \\\n connection_options = {\"path\": path_datasink1, \"partitionKeys\": [\"intvl_date\", \"intvl_hhmm\"]}, \\\n format_options={\"quoteChar\": -1, \"timestamp.formats\": \"yyyy-MM-dd HH:mm:ss\"}, \\\n format = src_format, transformation_ctx = \"datasink1\")\n\n#### Main\ndata_frame_datasource0 = glueContext.create_data_frame.from_catalog(stream_batch_time = BATCH_WIN_SIZE, \\\n database = glue_dbname, table_name = glue_tablename, transformation_ctx = \"datasource0\", \\\n additional_options = {\"startingPosition\": \"TRIM_HORIZON\", \"inferSchema\": \"false\"})\ndata_frame_datasource0.printSchema()\nglueContext.forEachBatch(frame = data_frame_datasource0, batch_function = processBatch, \\\n options = {\"windowSize\": BATCH_WIN_SIZE, \"checkpointLocation\": s3path_chkpt})\njob.commit()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def __handle_import():
import sys
import os
cur_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(cur_path, '../../build/lib/')
sys.path.append(lib_path)
proto_path = os.path.join(cur_path, '../../build/protobuf_python/')
sys.path.append(proto_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def __handle_import():
import sys
import os
cur_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(cur_path, '../../build/lib/')
sys.path.append(lib_path)
proto_path = os.path.join(cur_path, '../../build/protobuf_python/')
sys.path.append(proto_path)
__handle_import()
|
flexible
|
{
"blob_id": "24595979199199ecc6bc6f3a26e0db418def8b78",
"index": 9675,
"step-1": "<mask token>\n",
"step-2": "def __handle_import():\n import sys\n import os\n cur_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n lib_path = os.path.join(cur_path, '../../build/lib/')\n sys.path.append(lib_path)\n proto_path = os.path.join(cur_path, '../../build/protobuf_python/')\n sys.path.append(proto_path)\n\n\n<mask token>\n",
"step-3": "def __handle_import():\n import sys\n import os\n cur_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n lib_path = os.path.join(cur_path, '../../build/lib/')\n sys.path.append(lib_path)\n proto_path = os.path.join(cur_path, '../../build/protobuf_python/')\n sys.path.append(proto_path)\n\n\n__handle_import()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
"""
Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian, Sina Ghiassian
Purpose: Skeleton code for Monte Carlo Exploring Starts Control Agent
for use on A3 of Reinforcement learning course University of Alberta Fall 2017
"""
"""
/*
* Copyright (c) HAOTIAN ZHU ,COMPUT301,University Of Alberta All Rights Reserved.
* You May Use, Distribute Or Modify This Code Under Term And
* Condition Of Code Of Students Behavior At University Of Alberta.
*
*
* Author: Haotian Zhu
* If You Have Any Question Please contact [email protected].
*
*/
"""
import numpy as np
import pickle
from importlib import import_module
tile = import_module("tiles3")
iht = tile.IHT(3000)
w = None
currentState = None
lastState = None
alpha = 0.01/50
gamma = 1.0
x = None
def agent_init():
global w,currentState,lastState,x
w = np.zeros(1200)
currentState = np.zeros(1)
lastState = np.zeros(1)
return
def agent_start(state):
global w,currentState,lastState,x
currentState[0] = float(state[0]/200.0)
lastState[0] = currentState[0]
action = chooseAction(state[0])
return action
def agent_step(reward, state):
global w,currentState,lastState,x
state1 = np.zeros(1200)
state2 = np.zeros(1200)
currentState[0] = float(state[0]/200.0)
currentx = tile.tiles(iht,50,currentState)
lastx = tile.tiles(iht,50,lastState)
for index in currentx:
state1[index] = 1
for index in lastx:
state2[index] = 1
w = w + alpha*(reward+gamma*np.dot(w,state1) - np.dot(w,state2))*state2
lastState[0] = currentState[0]
action = chooseAction(state[0])
return action
def agent_end(reward):
global w,currentState,lastState,x
state2 = np.zeros(1200)
lastx = tile.tiles(iht,50,lastState)
for index in lastx:
state2[index] = 1
w = w + alpha*(reward- np.dot(w,state2))*state2
return
def agent_cleanup():
"""
This function is not used
"""
# clean up
return
def agent_message(in_message): # returns string, in_message: string
global w
"""
Arguments: in_message: string
returns: The value function as a string.
This function is complete. You do not need to add code here.
"""
# should not need to modify this function. Modify at your own risk
if (in_message == 'ValueFunction'):
out = np.zeros(1000)
for i in range(1000):
x = tile.tiles(iht,50,[float(i/200.0)])
state = np.zeros(1200)
for index in x:
state[index] = 1
out[i] = np.dot(w,state)
return out
else:
return "I don't know what to return!!"
def chooseAction(state):
if np.random.randint(2) : #1
result = np.random.randint(100)+1
if result+state>=1000:
return 1000-state
else:
return result
else:
result = (np.random.randint(100)+1)*(-1)
if result+state<=0:
return state*(-1)
else:
return result
|
normal
|
{
"blob_id": "4e02edcf8a512060fa92ede11f33993978584147",
"index": 1997,
"step-1": "\n\n\n\n#!/usr/bin/env python\n\n\"\"\"\n Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian, Sina Ghiassian\n Purpose: Skeleton code for Monte Carlo Exploring Starts Control Agent\n\t\t for use on A3 of Reinforcement learning course University of Alberta Fall 2017\n \n\"\"\"\n\"\"\"\n/*\n * Copyright (c) HAOTIAN ZHU ,COMPUT301,University Of Alberta All Rights Reserved.\n * You May Use, Distribute Or Modify This Code Under Term And \n * Condition Of Code Of Students Behavior At University Of Alberta.\n *\n *\n * Author: Haotian Zhu\n * If You Have Any Question Please contact [email protected].\n * \n */\n\"\"\"\n\nimport numpy as np\nimport pickle\n\n\nfrom importlib import import_module\n\n\n\n\ntile = import_module(\"tiles3\")\niht = tile.IHT(3000)\n\n\n\nw = None\ncurrentState = None\nlastState = None\nalpha = 0.01/50\ngamma = 1.0\nx = None\n\n\ndef agent_init():\n\tglobal w,currentState,lastState,x\n\n\tw = np.zeros(1200)\n\tcurrentState = np.zeros(1)\n\tlastState = np.zeros(1)\n\n\n\treturn\n\ndef agent_start(state):\n\tglobal w,currentState,lastState,x\n\n\tcurrentState[0] = float(state[0]/200.0)\n\tlastState[0] = currentState[0]\n\taction = chooseAction(state[0])\n\n\n\n\n\treturn action\n\n\ndef agent_step(reward, state): \n\tglobal w,currentState,lastState,x\n\n\tstate1 = np.zeros(1200)\n\tstate2 = np.zeros(1200)\n\n\tcurrentState[0] = float(state[0]/200.0)\n\tcurrentx = tile.tiles(iht,50,currentState)\n\tlastx = tile.tiles(iht,50,lastState)\n\n\n\tfor index in currentx:\n\t\tstate1[index] = 1\n\tfor index in lastx:\n\t\tstate2[index] = 1\n\n\n\n\tw = w + alpha*(reward+gamma*np.dot(w,state1) - np.dot(w,state2))*state2\n\tlastState[0] = currentState[0]\n\taction = chooseAction(state[0])\n\n\treturn action\n\ndef agent_end(reward):\n\tglobal w,currentState,lastState,x\n\n\n\tstate2 = np.zeros(1200)\n\n\tlastx = tile.tiles(iht,50,lastState)\n\n\tfor index in lastx:\n\t\tstate2[index] = 1\n\n\n\tw = w + alpha*(reward- np.dot(w,state2))*state2\n\n\n\n\treturn\n\ndef agent_cleanup():\n\t\"\"\"\n\tThis function is not used\n\t\"\"\"\n\t# clean up\n\n\treturn\n\ndef agent_message(in_message): # returns string, in_message: string\n \tglobal w\n\t\"\"\"\n\tArguments: in_message: string\n\treturns: The value function as a string.\n\tThis function is complete. You do not need to add code here.\n\t\"\"\"\n\t# should not need to modify this function. Modify at your own risk\n\tif (in_message == 'ValueFunction'):\n\t\tout = np.zeros(1000)\n\t\tfor i in range(1000):\n\t\t\tx = tile.tiles(iht,50,[float(i/200.0)])\n\t\t\tstate = np.zeros(1200)\n\t\t\tfor index in x:\n\t\t\t\tstate[index] = 1\n\n\t\t\tout[i] = np.dot(w,state)\n\t\treturn out\n\telse:\n\t\treturn \"I don't know what to return!!\"\n\n\n\n\n\ndef chooseAction(state):\n\tif np.random.randint(2) : #1\n\t\tresult = np.random.randint(100)+1\n\t\tif result+state>=1000:\n\t\t\treturn 1000-state\n\t\telse:\n\t\t\treturn result\n\n\telse:\n\t\tresult = (np.random.randint(100)+1)*(-1)\n\t\tif result+state<=0:\n\t\t\treturn state*(-1)\n\t\telse:\n\t\t\treturn result \n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django import forms
from myapp.models import Student
from myapp.models import Employee
class EmpForm(forms.ModelForm):
class Meta:
model = Student
fields = "__all__"
class StudentForm(forms.Form):
firstname = forms.CharField(label="Enter first name:", max_length=50)
lastname = forms.CharField(label="Enter last name:", max_length=100)
email=forms.EmailField(label="Enter Email")
file=forms.FileField()
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
|
normal
|
{
"blob_id": "0b141ecca501c21df50e76d0841dd5651274f0da",
"index": 8509,
"step-1": "<mask token>\n\n\nclass StudentForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass EmpForm(forms.ModelForm):\n\n\n class Meta:\n model = Student\n fields = '__all__'\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-4": "from django import forms\nfrom myapp.models import Student\nfrom myapp.models import Employee\n\n\nclass EmpForm(forms.ModelForm):\n\n\n class Meta:\n model = Student\n fields = '__all__'\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label='Enter first name:', max_length=50)\n lastname = forms.CharField(label='Enter last name:', max_length=100)\n email = forms.EmailField(label='Enter Email')\n file = forms.FileField()\n\n\nclass EmployeeForm(forms.ModelForm):\n\n\n class Meta:\n model = Employee\n fields = '__all__'\n",
"step-5": "from django import forms\nfrom myapp.models import Student\nfrom myapp.models import Employee\n\n\nclass EmpForm(forms.ModelForm):\n class Meta:\n model = Student\n fields = \"__all__\"\n\n\nclass StudentForm(forms.Form):\n firstname = forms.CharField(label=\"Enter first name:\", max_length=50)\n lastname = forms.CharField(label=\"Enter last name:\", max_length=100)\n email=forms.EmailField(label=\"Enter Email\")\n file=forms.FileField()\n\nclass EmployeeForm(forms.ModelForm):\n class Meta:\n model = Employee\n fields = \"__all__\"\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
word=input()
letter,digit=0,0
for i in word:
if('a'<=i and i<='z') or ('A'<=i and i<='Z'):
letter+=1
if '0'<=i and i<='9':
digit+=1
print("LETTERS {0} \n DIGITS {1}".format(letter,digit))
|
normal
|
{
"blob_id": "f2a508ae99697d6ba320b158a1000379b975d568",
"index": 2227,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in word:\n if 'a' <= i and i <= 'z' or 'A' <= i and i <= 'Z':\n letter += 1\n if '0' <= i and i <= '9':\n digit += 1\nprint(\"\"\"LETTERS {0} \n DIGITS {1}\"\"\".format(letter, digit))\n",
"step-3": "word = input()\nletter, digit = 0, 0\nfor i in word:\n if 'a' <= i and i <= 'z' or 'A' <= i and i <= 'Z':\n letter += 1\n if '0' <= i and i <= '9':\n digit += 1\nprint(\"\"\"LETTERS {0} \n DIGITS {1}\"\"\".format(letter, digit))\n",
"step-4": "word=input()\nletter,digit=0,0\n\nfor i in word:\n if('a'<=i and i<='z') or ('A'<=i and i<='Z'):\n letter+=1\n if '0'<=i and i<='9':\n digit+=1\n\nprint(\"LETTERS {0} \\n DIGITS {1}\".format(letter,digit))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# This file is part of the functional_calculator_oop.py Task
# Create a class called Calculator
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
# We need this conditional check so that the code doesn't run automatically when we import it on another file
if __name__ == "__main__":
# Create calculator object
calc = Calculator()
# Use object to call methods
print(calc.Add(1, 2))
print(calc.Subtract(1, 2))
print(calc.Multiply(1, 2))
print(calc.Divide(1, 2))
# Here we can see that __name__ is main when ran from here directly, but calculator_oop when imported on another file
# print(__name__)
|
normal
|
{
"blob_id": "d2972fb7cff08e15957f9baeaa6fd9a6f5bbb006",
"index": 1127,
"step-1": "class Calculator:\n <mask token>\n\n def Subtract(self, num1, num2):\n return num1 - num2\n <mask token>\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-2": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n <mask token>\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-3": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n<mask token>\n",
"step-4": "class Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\nif __name__ == '__main__':\n calc = Calculator()\n print(calc.Add(1, 2))\n print(calc.Subtract(1, 2))\n print(calc.Multiply(1, 2))\n print(calc.Divide(1, 2))\n",
"step-5": "# This file is part of the functional_calculator_oop.py Task\n# Create a class called Calculator\nclass Calculator:\n\n def Add(self, num1, num2):\n return num1 + num2\n\n def Subtract(self, num1, num2):\n return num1 - num2\n\n def Multiply(self, num1, num2):\n return num1 * num2\n\n def Divide(self, num1, num2):\n return num1 / num2\n\n\n# We need this conditional check so that the code doesn't run automatically when we import it on another file\nif __name__ == \"__main__\":\n # Create calculator object\n calc = Calculator()\n # Use object to call methods\n print(calc.Add(1, 2))\n print(calc.Subtract(1, 2))\n print(calc.Multiply(1, 2))\n print(calc.Divide(1, 2))\n\n# Here we can see that __name__ is main when ran from here directly, but calculator_oop when imported on another file\n# print(__name__)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from rest_framework import serializers
from plan.models import RoughRequirement, DetailedRequirement
from plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor
from plan.models import BasisTemplate
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ['id', 'name']
extra_kwargs = {'name': {'required': False}}
class RoughRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description']
class DetailedRequirementSerializer(serializers.ModelSerializer):
class Meta:
model = DetailedRequirement
fields = ['id', 'index', 'description', 'indicator_warning_line', 'rough_requirement']
class RequirementSerializer(serializers.ModelSerializer):
detailed_requirements = DetailedRequirementSerializer(many=True)
class Meta:
model = RoughRequirement
fields = ['id', 'index', 'title', 'description', 'detailed_requirements']
class OfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = '__all__'
class SimpleOfferingCourseSerializer(serializers.ModelSerializer):
class Meta:
model = OfferingCourse
fields = ["id", "name"]
class FieldOfStudySerializer(serializers.ModelSerializer):
class Meta:
model = FieldOfStudy
fields = '__all__'
class IndicatorFactorSerializer(serializers.ModelSerializer):
class Meta:
model = IndicatorFactor
fields = '__all__'
class BasisTemplateSerializer(serializers.ModelSerializer):
class Meta:
model = BasisTemplate
fields = '__all__'
class ReadIndicatorFactorSerializer(serializers.ModelSerializer):
offering_course = SimpleOfferingCourseSerializer()
field_of_study = FieldOfStudySerializer()
basis_templates = BasisTemplateSerializer(many=True)
rough_requirement = serializers.IntegerField(source='detailed_requirement.rough_requirement.id')
detailed_index = serializers.IntegerField(source='detailed_requirement.index')
rough_index = serializers.IntegerField(source='detailed_requirement.rough_requirement.index')
detailed_description = serializers.CharField(source='detailed_requirement.description')
rough_description = serializers.CharField(source='detailed_requirement.rough_requirement.description')
rough_title = serializers.CharField(source='detailed_requirement.rough_requirement.title')
class Meta:
model = IndicatorFactor
fields = '__all__'
|
normal
|
{
"blob_id": "596f7dfacc931f5e756c71b8622f4001df19934b",
"index": 5964,
"step-1": "<mask token>\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom plan.models import RoughRequirement, DetailedRequirement\nfrom plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor\nfrom plan.models import BasisTemplate\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line',\n 'rough_requirement']\n\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description',\n 'detailed_requirements']\n\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source=\n 'detailed_requirement.index')\n rough_index = serializers.IntegerField(source=\n 'detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source=\n 'detailed_requirement.description')\n rough_description = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source=\n 'detailed_requirement.rough_requirement.title')\n\n\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n",
"step-5": "from rest_framework import serializers\nfrom plan.models import RoughRequirement, DetailedRequirement\nfrom plan.models import OfferingCourse, FieldOfStudy, IndicatorFactor\nfrom plan.models import BasisTemplate\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = ['id', 'name']\n extra_kwargs = {'name': {'required': False}}\n\nclass RoughRequirementSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description']\n\nclass DetailedRequirementSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DetailedRequirement\n fields = ['id', 'index', 'description', 'indicator_warning_line', 'rough_requirement']\n\nclass RequirementSerializer(serializers.ModelSerializer):\n detailed_requirements = DetailedRequirementSerializer(many=True)\n class Meta:\n model = RoughRequirement\n fields = ['id', 'index', 'title', 'description', 'detailed_requirements']\n\nclass OfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = '__all__'\n\nclass SimpleOfferingCourseSerializer(serializers.ModelSerializer):\n class Meta:\n model = OfferingCourse\n fields = [\"id\", \"name\"]\n\nclass FieldOfStudySerializer(serializers.ModelSerializer):\n class Meta:\n model = FieldOfStudy\n fields = '__all__'\n\nclass IndicatorFactorSerializer(serializers.ModelSerializer):\n class Meta:\n model = IndicatorFactor\n fields = '__all__'\n\nclass BasisTemplateSerializer(serializers.ModelSerializer):\n class Meta:\n model = BasisTemplate\n fields = '__all__'\n\nclass ReadIndicatorFactorSerializer(serializers.ModelSerializer):\n offering_course = SimpleOfferingCourseSerializer()\n field_of_study = FieldOfStudySerializer()\n basis_templates = BasisTemplateSerializer(many=True)\n rough_requirement = serializers.IntegerField(source='detailed_requirement.rough_requirement.id')\n detailed_index = serializers.IntegerField(source='detailed_requirement.index')\n rough_index = serializers.IntegerField(source='detailed_requirement.rough_requirement.index')\n detailed_description = serializers.CharField(source='detailed_requirement.description')\n rough_description = serializers.CharField(source='detailed_requirement.rough_requirement.description')\n rough_title = serializers.CharField(source='detailed_requirement.rough_requirement.title')\n class Meta:\n model = IndicatorFactor\n fields = '__all__'",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
# -*- coding: utf-8 -*-
# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34
from absl import flags, app
from Rank_consistent_model_fix import *
from Rank_consistent_model import *
from random import shuffle, random
import tensorflow as tf
import numpy as np
# import cv2
import os
import sys
import datetime
flags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')
flags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')
flags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')
flags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')
flags.DEFINE_string("val_txt_path_2", "D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt", "Validataion text path")
flags.DEFINE_integer('img_size', 128, 'Image size')
flags.DEFINE_integer('ch', 3, 'Image channels')
flags.DEFINE_integer('batch_size', 256, 'Train Batch size')
flags.DEFINE_integer("val_batch_size", 128, "Validation Batch size")
flags.DEFINE_integer("val_batch_size_2", 128, "Validation2 batch size")
flags.DEFINE_integer('num_classes', 48, 'Number of classes')
flags.DEFINE_integer('epochs', 5000, 'Total epochs of training')
flags.DEFINE_float("lr", 5e-5, "Learning rate")
flags.DEFINE_string('weights', "/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5", '')
flags.DEFINE_bool('train', True, 'True or False')
flags.DEFINE_bool('pre_checkpoint', False, 'True or False')
flags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')
flags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')
flags.DEFINE_string("graphs", "", "")
flags.DEFINE_integer('n_test', 10000, 'Number of test images')
flags.DEFINE_string('test_txt', '', 'Test text(label) path')
flags.DEFINE_string('test_img', '', 'Test images path')
flags.DEFINE_string("output_loss_txt", "/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt", "")
FLAGS = flags.FLAGS
FLAGS(sys.argv)
optimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)
def _func(filename, label):
image_string = tf.io.read_file(filename)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])
if random() > 0.5:
decode_image = tf.image.flip_left_right(decode_image)
#decode_image = tf.image.per_image_standardization(decode_image)
label = label - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot, label
def val_func(name, label):
image_string = tf.io.read_file(name)
decode_image = tf.image.decode_jpeg(image_string, channels=3)
decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.
#decode_image = tf.image.per_image_standardization(decode_image)
label = int(label) - 16
one_hot = tf.one_hot(label, FLAGS.num_classes)
return decode_image, one_hot
#@tf.function
def run_model(model, images):
logits, probs = model(images, training=True)
return logits, probs
@tf.function
def train_step(model, images, levels, imp):
with tf.GradientTape() as tape:
logits, probs = run_model(model, images)
#total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))
# total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))
total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))
#total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \
# + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)
total_loss = tf.reduce_mean(total_loss)
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return total_loss
def task_importance_weights(data):
label = np.array(data).astype(np.float32)
num_examples = label.size
y = np.unique(label)
m = np.zeros(label.shape)
for i, t in enumerate(np.arange(np.min(y), np.max(y))):
m_k = np.max([label[label > t].size,
num_examples - label[label > t].size])
#print(m_k)
m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)
m[i] = tf.sqrt(m_k)
# m[i] = float(m_k)**(0.5)
max_ = np.max(m)
imp = tf.cast(m / max_, tf.float32)
#print(imp)
return imp
@tf.function
def test_MAE(model, images, labels):
logits, probs = model(images, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict, 1)
grd_age = tf.argmax(labels, 1) + 1
grd_age = tf.cast(grd_age, tf.float32)
AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))
return AE
def make_levels(labels):
levels = []
for i in range(FLAGS.batch_size):
l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())
l = tf.cast(l, tf.float32)
levels.append(l)
return tf.convert_to_tensor(levels, tf.float32)
def main(argv=None):
# train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)
train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,
batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')
regularizer = tf.keras.regularizers.l2(0.000005)
initializer = tf.keras.initializers.glorot_normal()
for layer in train_model.layers:
for attr in ['kernel_regularizer']:
if hasattr(layer, attr):
setattr(layer, attr, regularizer)
# for attr_ in ["kernel_initializer"]:
# if hasattr(layer, attr_):
# setattr(layer, attr_, initializer)
x = train_model.output
avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)
# avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])
# fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)
# logits = Linear(NUM_CLASSES - 1)(fc)
logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)
logits = Linear(FLAGS.num_classes - 1)(logits)
probs = tf.nn.sigmoid(logits)
train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])
train_model.summary()
#for m in train_model.layers:
# if isinstance(m, tf.keras.layers.Conv2D):
# a = m.output_mask
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, (2. / n)**.5)
# elif isinstance(m, tf.keras.layers.BatchNormalization):
# m.get_weights
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if FLAGS.pre_checkpoint is True:
ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
print(ckpt_manager.latest_checkpoint)
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
if FLAGS.train == True:
data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)
imp = task_importance_weights(data_label-16)
imp = imp[0:FLAGS.num_classes-1]
val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])
print(len(val_data_name))
WM_img, WM_age = [], []
WF_img, WF_age = [], []
BM_img, BM_age = [], []
BF_img, BF_age = [], []
for i in range(len(val_data_name)):
if val_data_name[i][2] == "M" and val_data_name[i][3] == "W":
WM_img.append(FLAGS.val_img_path + val_data_name[i][0])
WM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "W":
WF_img.append(FLAGS.val_img_path + val_data_name[i][0])
WF_age.append(val_data_name[i][1])
if val_data_name[i][2] == "M" and val_data_name[i][3] == "B":
BM_img.append(FLAGS.val_img_path + val_data_name[i][0])
BM_age.append(val_data_name[i][1])
if val_data_name[i][2] == "F" and val_data_name[i][3] == "B":
BF_img.append(FLAGS.val_img_path + val_data_name[i][0])
BF_age.append(val_data_name[i][1])
print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))
WM_img, WM_age = np.array(WM_img), np.array(WM_age)
WF_img, WF_age = np.array(WF_img), np.array(WF_age)
BM_img, BM_age = np.array(BM_img), np.array(BM_age)
BF_img, BF_age = np.array(BF_img), np.array(BF_age)
all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]
batch_idx = len(data_label) // FLAGS.batch_size
#current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#train_log_dir = FLAGS.graphs + current_time + '/train'
#val_log_dir = FLAGS.graphs + current_time + '/val'
#train_summary_writer = tf.summary.create_file_writer(train_log_dir)
#val_summary_writer = tf.summary.create_file_writer(val_log_dir)
loss_f = open(FLAGS.output_loss_txt, "w")
count = 0
for epoch in range(FLAGS.epochs):
A = list(zip(data_name, data_label))
shuffle(A)
data_name, data_label = zip(*A)
data_name = np.array(data_name)
data_label = np.array(data_label)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(FLAGS.batch_size)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
it = iter(data_generator)
#imp = task_importance_weights(data_label)
#imp = imp[0:FLAGS.num_classes-1]
for step in range(batch_idx):
batch_images, batch_labels, age = next(it)
levels = make_levels(age)
total_loss = train_step(train_model, batch_images, levels, imp)
#with val_summary_writer.as_default():
# tf.summary.scalar(u'total loss', loss, step=count)
if count % 10 == 0:
#MAE = test_MAE(train_model, batch_images, batch_labels, levels)
print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))
if count % 100 == 0:
test_list = ["WM", "WF", "BM", "BF"]
for j in range(len(all_val_list)):
val_img, val_lab = all_val_list[j]
val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))
val_data_generator = val_data_generator.map(val_func)
val_data_generator = val_data_generator.batch(1)
val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)
val_idx = len(val_img) // 1
val_it = iter(val_data_generator)
AE = 0
for i in range(val_idx):
img, lab = next(val_it)
pre_age = test_MAE(train_model, img, lab)
AE += pre_age
print("MAE = {} ({})".format(AE / len(val_img), test_list[j]))
loss_f.write("Epochs: {}, step = {}".format(epoch, count))
loss_f.write(" --> ")
loss_f.write(test_list[j])
loss_f.write(": ")
loss_f.write(str(AE / len(val_img)))
loss_f.write(", ")
loss_f.write("\n")
loss_f.flush()
# print("==========")
# print("[2]MAE = {}".format(MAE))
# print("==========")
# model_dir = FLAGS.save_checkpoint
# folder_name = int((count + 1)/val_idx)
# folder_name_str = "%s/%s" % (model_dir, folder_name)
# if not os.path.isdir(folder_name_str):
# print("Make {} folder to save checkpoint".format(folder_name))
# os.makedirs(folder_name_str)
# ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)
# checkpoint_dir = folder_name_str + "/" + "CORAL_{}_steps.ckpt".format(count)
# ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)
# ckpt_manager.save()
# with val_summary_writer.as_default():
# tf.summary.scalar(u'[2]MAE', MAE, step=count)
count += 1
else:
data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)
data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]
data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)
data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))
data_generator = data_generator.shuffle(len(data_name))
data_generator = data_generator.map(_func)
data_generator = data_generator.batch(1)
data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)
MAE = 0
it = iter(data_generator)
for i in range(FLAGS.n_test):
image, labels, opp_labels = next(it)
_, probs = train_model(image, training=False)
predict = probs > 0.5
predict = tf.cast(predict, tf.float32)
pre_age = tf.reduce_sum(predict)
age = tf.cast(age, tf.float32)
MAE += tf.reduce_sum(tf.math.abs(grd_age - age))
if i % 1000 == 0:
print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))
print('Total MAE = {}'.format(MAE / FLAGS.n_test))
if __name__ == '__main__':
app.run(main)
|
normal
|
{
"blob_id": "9ffe350ff9a568111620ef7dafef83d341f6f01e",
"index": 9409,
"step-1": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\n<mask token>\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\n<mask token>\n",
"step-3": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\n<mask token>\nFLAGS(sys.argv)\n<mask token>\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-4": "<mask token>\nflags.DEFINE_string('img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\nflags.DEFINE_string('txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt'\n , 'Text (with label information) directory')\nflags.DEFINE_string('val_img_path',\n '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/',\n 'Validate image path')\nflags.DEFINE_string('val_txt_path',\n '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt'\n , 'Validate text path')\nflags.DEFINE_string('val_txt_path_2',\n 'D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt'\n , 'Validataion text path')\nflags.DEFINE_integer('img_size', 128, 'Image size')\nflags.DEFINE_integer('ch', 3, 'Image channels')\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\nflags.DEFINE_integer('val_batch_size', 128, 'Validation Batch size')\nflags.DEFINE_integer('val_batch_size_2', 128, 'Validation2 batch size')\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\nflags.DEFINE_float('lr', 5e-05, 'Learning rate')\nflags.DEFINE_string('weights',\n '/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5'\n , '')\nflags.DEFINE_bool('train', True, 'True or False')\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\nflags.DEFINE_string('graphs', '', '')\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\nflags.DEFINE_string('test_img', '', 'Test images path')\nflags.DEFINE_string('output_loss_txt',\n '/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt', ''\n )\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot, label\n\n\ndef val_func(name, label):\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS\n .img_size - 8]) / 255.0\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n return decode_image, one_hot\n\n\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\n\[email protected]\ndef train_step(model, images, levels, imp):\n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n total_loss = -tf.reduce_sum((tf.math.log_sigmoid(logits) * levels +\n (tf.math.log_sigmoid(logits) - logits) * (1 - levels)) * imp, 1)\n total_loss = tf.reduce_mean(total_loss)\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n y = np.unique(label)\n m = np.zeros(label.shape)\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, num_examples - label[label > t\n ].size])\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n return imp\n\n\[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * labels[i].numpy() + [0] * (FLAGS.num_classes - 1 - labels\n [i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n return tf.convert_to_tensor(levels, tf.float32)\n\n\ndef main(argv=None):\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size -\n 8, FLAGS.ch), include_top=False, batch_size=FLAGS.batch_size,\n weight_path=FLAGS.weights, weights='imagenet')\n regularizer = tf.keras.regularizers.l2(5e-06)\n initializer = tf.keras.initializers.glorot_normal()\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n logits = tf.keras.layers.Dense(FLAGS.num_classes - 1, use_bias=False)(\n avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits,\n probs])\n train_model.summary()\n if FLAGS.pre_checkpoint is True:\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer\n )\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.\n pre_checkpoint_path, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print('Latest checkpoint restored!!')\n if FLAGS.train == True:\n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.img_path + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0,\n usecols=1)\n imp = task_importance_weights(data_label - 16)\n imp = imp[0:FLAGS.num_classes - 1]\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100',\n skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'W':\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'W':\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'M' and val_data_name[i][3] == 'B':\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n if val_data_name[i][2] == 'F' and val_data_name[i][3] == 'B':\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age\n ], [BF_img, BF_age]]\n batch_idx = len(data_label) // FLAGS.batch_size\n loss_f = open(FLAGS.output_loss_txt, 'w')\n count = 0\n for epoch in range(FLAGS.epochs):\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.\n AUTOTUNE)\n it = iter(data_generator)\n for step in range(batch_idx):\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n if count % 10 == 0:\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, \n step + 1, batch_idx, total_loss))\n if count % 100 == 0:\n test_list = ['WM', 'WF', 'BM', 'BF']\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n val_data_generator = (tf.data.Dataset.\n from_tensor_slices((val_img, val_lab)))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf\n .data.experimental.AUTOTUNE)\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n print('MAE = {} ({})'.format(AE / len(val_img),\n test_list[j]))\n loss_f.write('Epochs: {}, step = {}'.format(epoch,\n count))\n loss_f.write(' --> ')\n loss_f.write(test_list[j])\n loss_f.write(': ')\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(', ')\n loss_f.write('\\n')\n loss_f.flush()\n count += 1\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0,\n usecols=0)\n data_name = [(FLAGS.test_img + data_name_) for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=\n 0, usecols=1)\n data_generator = tf.data.Dataset.from_tensor_slices((data_name,\n data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n image, labels, opp_labels = next(it)\n _, probs = train_model(image, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-5": "# -*- coding: utf-8 -*-\n# https://github.com/Raschka-research-group/coral-cnn/tree/master/model-code/resnet34\nfrom absl import flags, app\nfrom Rank_consistent_model_fix import *\nfrom Rank_consistent_model import *\nfrom random import shuffle, random\n\nimport tensorflow as tf\nimport numpy as np\n# import cv2\nimport os\nimport sys\nimport datetime\n\nflags.DEFINE_string('img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Image directory')\n\nflags.DEFINE_string('txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/train.txt', 'Text (with label information) directory')\n\nflags.DEFINE_string('val_img_path', '/yuwhan/yuwhan/Dataset/[1]Third_dataset/UTK/UTKFace/', 'Validate image path')\n\nflags.DEFINE_string('val_txt_path', '/yuwhan/yuwhan/Dataset/[2]Fourth_dataset/age_banchmark/train_data/UTK/test.txt', 'Validate text path')\n\nflags.DEFINE_string(\"val_txt_path_2\", \"D:/[1]DB/[1]second_paper_DB/[1]First_fold/_MORPH_MegaAge_16_69_fullDB/[1]Full_DB/testB.txt\", \"Validataion text path\")\n\nflags.DEFINE_integer('img_size', 128, 'Image size')\n\nflags.DEFINE_integer('ch', 3, 'Image channels')\n\nflags.DEFINE_integer('batch_size', 256, 'Train Batch size')\n\nflags.DEFINE_integer(\"val_batch_size\", 128, \"Validation Batch size\")\n\nflags.DEFINE_integer(\"val_batch_size_2\", 128, \"Validation2 batch size\")\n\nflags.DEFINE_integer('num_classes', 48, 'Number of classes')\n\nflags.DEFINE_integer('epochs', 5000, 'Total epochs of training')\n\nflags.DEFINE_float(\"lr\", 5e-5, \"Learning rate\")\n\nflags.DEFINE_string('weights', \"/yuwhan/yuwhan/Projects/[1]Age_related_work_2.x_My_version/Rank-consistent Ordinal Regression for Neural/resnet34_imagenet_1000_no_top.h5\", '')\n\nflags.DEFINE_bool('train', True, 'True or False')\n\nflags.DEFINE_bool('pre_checkpoint', False, 'True or False')\n\nflags.DEFINE_string('pre_checkpoint_path', '', 'Saved checkpoint path')\n\nflags.DEFINE_string('save_checkpoint', '', 'Save checkpoint path')\n\nflags.DEFINE_string(\"graphs\", \"\", \"\")\n\nflags.DEFINE_integer('n_test', 10000, 'Number of test images')\n\nflags.DEFINE_string('test_txt', '', 'Test text(label) path')\n\nflags.DEFINE_string('test_img', '', 'Test images path')\n\nflags.DEFINE_string(\"output_loss_txt\", \"/yuwhan/Edisk/yuwhan/Edisk/4th_paper/age_banchmark/UTK/loss_CORAL.txt\", \"\")\n\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\n\noptimizer = tf.keras.optimizers.Adam(FLAGS.lr,beta_1=0.9, beta_2=0.99)\n\n\ndef _func(filename, label):\n\n image_string = tf.io.read_file(filename)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.random_crop(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8, 3])\n\n if random() > 0.5:\n decode_image = tf.image.flip_left_right(decode_image)\n\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = label - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes)\n \n return decode_image, one_hot, label\n\ndef val_func(name, label):\n\n image_string = tf.io.read_file(name)\n decode_image = tf.image.decode_jpeg(image_string, channels=3)\n decode_image = tf.image.resize(decode_image, [FLAGS.img_size - 8, FLAGS.img_size - 8]) / 255.\n #decode_image = tf.image.per_image_standardization(decode_image)\n\n label = int(label) - 16\n one_hot = tf.one_hot(label, FLAGS.num_classes) \n \n return decode_image, one_hot\n\n#@tf.function\ndef run_model(model, images):\n logits, probs = model(images, training=True)\n return logits, probs\n\[email protected]\ndef train_step(model, images, levels, imp):\n \n with tf.GradientTape() as tape:\n logits, probs = run_model(model, images)\n\n #total_loss = (-tf.reduce_sum((tf.nn.log_softmax(logits, axis=2)[:,:,1]*levels + tf.nn.log_softmax(logits, axis=2)[:,:,0]*(1-levels))*imp, 1))\n \n # total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + tf.math.log(1. - tf.nn.sigmoid(logits))*(1-levels))*imp, 1))\n total_loss = (-tf.reduce_sum( (tf.math.log_sigmoid(logits)*levels + (tf.math.log_sigmoid(logits) - logits)*(1-levels))*imp, 1))\n #total_loss = -tf.reduce_sum((tf.math.log(tf.nn.softmax(logits, 2)[:, :, 1] + 1e-7) * levels \\\n # + tf.math.log(tf.nn.softmax(logits, 2)[:, :, 0] + 1e-7) * (1 - levels)) * imp, 1)\n\n total_loss = tf.reduce_mean(total_loss)\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return total_loss\n\ndef task_importance_weights(data):\n label = np.array(data).astype(np.float32)\n num_examples = label.size\n\n y = np.unique(label)\n \n m = np.zeros(label.shape)\n\n for i, t in enumerate(np.arange(np.min(y), np.max(y))):\n m_k = np.max([label[label > t].size, \n num_examples - label[label > t].size])\n #print(m_k)\n m_k = tf.cast(tf.convert_to_tensor(m_k), tf.float32)\n m[i] = tf.sqrt(m_k)\n # m[i] = float(m_k)**(0.5)\n\n max_ = np.max(m)\n imp = tf.cast(m / max_, tf.float32)\n #print(imp)\n return imp\n \[email protected]\ndef test_MAE(model, images, labels):\n logits, probs = model(images, training=False)\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict, 1)\n grd_age = tf.argmax(labels, 1) + 1\n grd_age = tf.cast(grd_age, tf.float32)\n AE = tf.reduce_sum(tf.math.abs(grd_age - pre_age))\n return AE\n\ndef make_levels(labels):\n levels = []\n for i in range(FLAGS.batch_size):\n l = [1] * (labels[i].numpy()) + [0]*(FLAGS.num_classes - 1 - labels[i].numpy())\n l = tf.cast(l, tf.float32)\n levels.append(l)\n\n return tf.convert_to_tensor(levels, tf.float32)\n\ndef main(argv=None):\n\n # train_model = resnet_type1(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, 3), NUM_CLASSES=FLAGS.num_classes)\n train_model = ResNet34(input_shape=(FLAGS.img_size - 8, FLAGS.img_size - 8, FLAGS.ch), include_top=False,\n batch_size=FLAGS.batch_size, weight_path=FLAGS.weights, weights='imagenet')\n\n regularizer = tf.keras.regularizers.l2(0.000005)\n initializer = tf.keras.initializers.glorot_normal()\n\n for layer in train_model.layers:\n for attr in ['kernel_regularizer']:\n if hasattr(layer, attr):\n setattr(layer, attr, regularizer)\n # for attr_ in [\"kernel_initializer\"]:\n # if hasattr(layer, attr_):\n # setattr(layer, attr_, initializer)\n\n x = train_model.output\n avgpool = tf.keras.layers.GlobalAveragePooling2D()(x)\n # avgpool = tf.reshape(avgpool, [avgpool.shape[0], -1])\n # fc = tf.keras.layers.Dense(1, use_bias=False)(avgpool)\n\n # logits = Linear(NUM_CLASSES - 1)(fc)\n logits = tf.keras.layers.Dense(FLAGS.num_classes-1, use_bias=False)(avgpool)\n logits = Linear(FLAGS.num_classes - 1)(logits)\n probs = tf.nn.sigmoid(logits)\n\n train_model = tf.keras.Model(inputs=train_model.input, outputs=[logits, probs])\n train_model.summary()\n\n #for m in train_model.layers:\n # if isinstance(m, tf.keras.layers.Conv2D):\n # a = m.output_mask\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, (2. / n)**.5)\n # elif isinstance(m, tf.keras.layers.BatchNormalization):\n # m.get_weights\n # m.weight.data.fill_(1)\n # m.bias.data.zero_()\n\n if FLAGS.pre_checkpoint is True:\n\n ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, max_to_keep=5)\n\n # if a checkpoint exists, restore the latest checkpoint.\n if ckpt_manager.latest_checkpoint:\n print(ckpt_manager.latest_checkpoint)\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!')\n\n if FLAGS.train == True:\n \n data_name = np.loadtxt(FLAGS.txt_path, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.img_path + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.int32, skiprows=0, usecols=1)\n\n imp = task_importance_weights(data_label-16)\n imp = imp[0:FLAGS.num_classes-1]\n\n val_data_name = np.loadtxt(FLAGS.val_txt_path, dtype='<U100', skiprows=0, usecols=[0, 1, 2, 3])\n print(len(val_data_name))\n WM_img, WM_age = [], []\n WF_img, WF_age = [], []\n BM_img, BM_age = [], []\n BF_img, BF_age = [], []\n for i in range(len(val_data_name)):\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"W\":\n WM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"W\":\n WF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n WF_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"M\" and val_data_name[i][3] == \"B\":\n BM_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BM_age.append(val_data_name[i][1])\n\n if val_data_name[i][2] == \"F\" and val_data_name[i][3] == \"B\":\n BF_img.append(FLAGS.val_img_path + val_data_name[i][0])\n BF_age.append(val_data_name[i][1])\n \n print(len(WM_img), len(WF_img), len(BM_img), len(BF_img))\n WM_img, WM_age = np.array(WM_img), np.array(WM_age)\n WF_img, WF_age = np.array(WF_img), np.array(WF_age)\n BM_img, BM_age = np.array(BM_img), np.array(BM_age)\n BF_img, BF_age = np.array(BF_img), np.array(BF_age)\n all_val_list = [[WM_img, WM_age], [WF_img, WF_age], [BM_img, BM_age], [BF_img, BF_age]]\n\n batch_idx = len(data_label) // FLAGS.batch_size\n\n #current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n #train_log_dir = FLAGS.graphs + current_time + '/train'\n #val_log_dir = FLAGS.graphs + current_time + '/val'\n #train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n #val_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n loss_f = open(FLAGS.output_loss_txt, \"w\")\n count = 0\n for epoch in range(FLAGS.epochs):\n\n A = list(zip(data_name, data_label))\n shuffle(A)\n data_name, data_label = zip(*A)\n data_name = np.array(data_name)\n data_label = np.array(data_label)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(FLAGS.batch_size)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n it = iter(data_generator)\n\n #imp = task_importance_weights(data_label)\n #imp = imp[0:FLAGS.num_classes-1]\n for step in range(batch_idx):\n\n batch_images, batch_labels, age = next(it)\n levels = make_levels(age)\n total_loss = train_step(train_model, batch_images, levels, imp)\n\n #with val_summary_writer.as_default():\n # tf.summary.scalar(u'total loss', loss, step=count)\n\n if count % 10 == 0:\n #MAE = test_MAE(train_model, batch_images, batch_labels, levels)\n print('Epoch: {} [{}/{}] loss = {}'.format(epoch + 1, step + 1, batch_idx, total_loss))\n\n if count % 100 == 0:\n test_list = [\"WM\", \"WF\", \"BM\", \"BF\"]\n for j in range(len(all_val_list)):\n val_img, val_lab = all_val_list[j]\n\n val_data_generator = tf.data.Dataset.from_tensor_slices((val_img, val_lab))\n val_data_generator = val_data_generator.map(val_func)\n val_data_generator = val_data_generator.batch(1)\n val_data_generator = val_data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n val_idx = len(val_img) // 1\n val_it = iter(val_data_generator)\n AE = 0\n\n for i in range(val_idx):\n img, lab = next(val_it)\n pre_age = test_MAE(train_model, img, lab)\n AE += pre_age\n\n print(\"MAE = {} ({})\".format(AE / len(val_img), test_list[j]))\n\n loss_f.write(\"Epochs: {}, step = {}\".format(epoch, count))\n loss_f.write(\" --> \")\n loss_f.write(test_list[j])\n loss_f.write(\": \")\n loss_f.write(str(AE / len(val_img)))\n loss_f.write(\", \")\n\n loss_f.write(\"\\n\")\n loss_f.flush()\n\n\n\n # print(\"==========\")\n # print(\"[2]MAE = {}\".format(MAE))\n # print(\"==========\")\n # model_dir = FLAGS.save_checkpoint\n # folder_name = int((count + 1)/val_idx)\n # folder_name_str = \"%s/%s\" % (model_dir, folder_name)\n # if not os.path.isdir(folder_name_str):\n # print(\"Make {} folder to save checkpoint\".format(folder_name))\n # os.makedirs(folder_name_str)\n # ckpt = tf.train.Checkpoint(train_model=train_model, optimizer=optimizer)\n # checkpoint_dir = folder_name_str + \"/\" + \"CORAL_{}_steps.ckpt\".format(count)\n # ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, 5)\n # ckpt_manager.save()\n\n # with val_summary_writer.as_default():\n # tf.summary.scalar(u'[2]MAE', MAE, step=count)\n\n count += 1\n\n else:\n data_name = np.loadtxt(FLAGS.test_txt, dtype='<U100', skiprows=0, usecols=0)\n data_name = [FLAGS.test_img + data_name_ for data_name_ in data_name]\n data_label = np.loadtxt(FLAGS.txt_path, dtype=np.float32, skiprows=0, usecols=1)\n\n data_generator = tf.data.Dataset.from_tensor_slices((data_name, data_label))\n data_generator = data_generator.shuffle(len(data_name))\n data_generator = data_generator.map(_func)\n data_generator = data_generator.batch(1)\n data_generator = data_generator.prefetch(tf.data.experimental.AUTOTUNE)\n\n MAE = 0\n it = iter(data_generator)\n for i in range(FLAGS.n_test):\n\n image, labels, opp_labels = next(it)\n\n _, probs = train_model(image, training=False)\n\n predict = probs > 0.5\n predict = tf.cast(predict, tf.float32)\n pre_age = tf.reduce_sum(predict)\n age = tf.cast(age, tf.float32)\n MAE += tf.reduce_sum(tf.math.abs(grd_age - age))\n\n if i % 1000 == 0:\n print('{} image(s) for MAE = {}'.format(i + 1, MAE / (i + 1)))\n\n print('Total MAE = {}'.format(MAE / FLAGS.n_test))\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
# -*- coding: utf-8 -*-
'''
Created on Dec 22, 2014
@author: Alan Tai
'''
from handlers.handler_webapp2_extra_auth import BaseHandler
from models.models_porn_info import WebLinkRoot, WebLinkPornTemp, WebLinkPorn,\
Tag
from dictionaries.dict_key_value_pairs import KeyValuePairsGeneral
from bs4 import BeautifulSoup
import webapp2, logging, re, urllib2, urlparse
from datetime import datetime
#
dict_general = KeyValuePairsGeneral()
class TaskCrawlRootLinksDispatcher(BaseHandler):
def get(self):
self._read_feed()
def _read_feed(self):
""" crawling task """
# temp root links
root_list_temp = dict_general.default_urls
# construct search list
search_list = []
query_root_entities = WebLinkRoot.query()
if query_root_entities.count() > 0:
for entity in query_root_entities:
search_list.append({"title" : entity.title , "link" : entity.link})
else:
search_list = root_list_temp
# start to crawl
list_found_link = []
while len(search_list) > 0:
link = search_list.pop(0)["link"]
parsed_str = urlparse.urlsplit(link)
link_base = "{url_scheme}://{url_netloc}".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)
try:
req = urllib2.Request(link)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
if found_link.get('href'):
match_group = re.match("http", found_link.get('href'), re.I)
full_href = ""
title = "NA"
if not match_group:
full_href = "{href_link_base}{sub_href}".format(href_link_base = link_base, sub_href = found_link.get('href'))
else:
full_href = found_link.get('href')
if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:
title = found_link.contents[0].string
list_found_link.append({'title' : title, 'link' : full_href})
except urllib2.HTTPError, err:
pass
# store result into db
while len(list_found_link) > 0:
new_link = list_found_link.pop(0)
query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])
if query.count() == 0:
new_info = WebLinkPornTemp()
new_info.link = new_link['link']
new_info.title = new_link['title']
new_info.put()
# crawl temp links
class TaskCrawlTempLinksDispatcher(BaseHandler):
def get(self):
# fetch entities from db
entities = WebLinkPornTemp.query().fetch(15)
search_list = []
if entities:
for entity in entities:
search_list.append({'title' : entity.title, 'link' : entity.link})
entity.key.delete()
else:
search_list = dict_general.default_urls
# crawl website
list_found_link = []
while len(search_list) > 0:
link = search_list.pop(0)['link']
parsed_str = urlparse.urlsplit(link)
link_base = "{url_scheme}://{url_netloc}".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)
try:
req = urllib2.Request(link)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
if found_link.get('href'):
match_group = re.match("http", found_link.get('href'), re.I)
full_href = ""
title = "NA"
if not match_group:
full_href = "{href_link_base}{sub_href}".format(href_link_base = link_base, sub_href = found_link.get('href'))
else:
full_href = found_link.get('href')
if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:
title = found_link.contents[0].string
list_found_link.append({'title' : title, 'link' : full_href})
except urllib2.HTTPError, err:
pass
# store result into db
while len(list_found_link) > 0:
new_link = list_found_link.pop(0)
query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])
if query.count() == 0:
new_info = WebLinkPornTemp()
new_info.link = new_link['link']
new_info.title = new_link['title']
new_info.put()
# categorize wine info
class TaskCategorizePornInfoDispatcher(BaseHandler):
def get(self):
""" cron task """
self._categorize()
def _categorize(self):
""" categorize wine info """
entities = WebLinkPornTemp.query().fetch(50) # to avoid running datastore free quota limit
for entity in entities:
result = re.findall(r"video\d+|redtube\.com\d+|videos\d+|watch\d+|viewkey=\d+", entity.link, re.I) # sku ; BuyWine/Item ; bwe
query = WebLinkPorn.query(WebLinkPorn.link == entity.link)
if result and query.count() == 0:
new_wine_info = WebLinkPorn()
new_wine_info.link = entity.link
new_wine_info.title = entity.title
new_wine_info.put()
class TaskCrawlTagInfo(BaseHandler):
def get(self):
base_url = 'http://www.xvideos.com/tags/'
req = urllib2.Request(base_url)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
try:
if found_link.get('href'):
match_group = re.match("/tags/.*", found_link.get('href'), re.I)
if match_group:
tag_name = found_link.get('href')[found_link.get('href').rfind('/') + 1:]
tag_number = str(found_link.nextSibling).strip()
tag_info = Tag( site = 'Xvideos',
name = tag_name,
number = tag_number,
created_datetime = datetime.now())
tag_info.put()
except:
pass
# configuration
config = dict_general.config_setting
# app
app = webapp2.WSGIApplication([
webapp2.Route(r'/cron_tasks/crawl_root_links', TaskCrawlRootLinksDispatcher, name = 'crawl_root_links'),
webapp2.Route(r'/cron_tasks/crawl_temp_links', TaskCrawlTempLinksDispatcher, name = 'crawl_temp_links'),
webapp2.Route(r'/cron_tasks/categorize_porn_info', TaskCategorizePornInfoDispatcher, name = "categorize_wine_info"),
webapp2.Route(r'/cron_tasks/crawl_tag_info', TaskCrawlTagInfo, name = 'crawl_tag_info')
], debug=True, config=config)
# log
logging.getLogger().setLevel(logging.DEBUG)
|
normal
|
{
"blob_id": "f6cebf6ec848a06f81c4e1f584ebb83f4d9ff47c",
"index": 3549,
"step-1": "# -*- coding: utf-8 -*-\n'''\nCreated on Dec 22, 2014\n\n@author: Alan Tai\n'''\nfrom handlers.handler_webapp2_extra_auth import BaseHandler\nfrom models.models_porn_info import WebLinkRoot, WebLinkPornTemp, WebLinkPorn,\\\n Tag\nfrom dictionaries.dict_key_value_pairs import KeyValuePairsGeneral\nfrom bs4 import BeautifulSoup\nimport webapp2, logging, re, urllib2, urlparse\nfrom datetime import datetime\n\n\n#\ndict_general = KeyValuePairsGeneral()\n\nclass TaskCrawlRootLinksDispatcher(BaseHandler):\n def get(self):\n self._read_feed()\n \n def _read_feed(self):\n \"\"\" crawling task \"\"\"\n # temp root links\n root_list_temp = dict_general.default_urls\n \n # construct search list\n search_list = []\n query_root_entities = WebLinkRoot.query()\n if query_root_entities.count() > 0:\n for entity in query_root_entities:\n search_list.append({\"title\" : entity.title , \"link\" : entity.link})\n else:\n search_list = root_list_temp\n \n # start to crawl\n list_found_link = []\n while len(search_list) > 0:\n link = search_list.pop(0)[\"link\"]\n parsed_str = urlparse.urlsplit(link)\n link_base = \"{url_scheme}://{url_netloc}\".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)\n \n \n try:\n req = urllib2.Request(link)\n response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript\n searched_page = response.read()\n soup = BeautifulSoup(searched_page)\n \n for found_link in soup.find_all('a'):\n if found_link.get('href'):\n match_group = re.match(\"http\", found_link.get('href'), re.I)\n full_href = \"\"\n title = \"NA\"\n \n if not match_group:\n full_href = \"{href_link_base}{sub_href}\".format(href_link_base = link_base, sub_href = found_link.get('href'))\n else:\n full_href = found_link.get('href')\n \n if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:\n title = found_link.contents[0].string\n \n list_found_link.append({'title' : title, 'link' : full_href})\n \n except urllib2.HTTPError, err:\n pass\n \n \n # store result into db\n while len(list_found_link) > 0:\n new_link = list_found_link.pop(0)\n query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])\n if query.count() == 0:\n new_info = WebLinkPornTemp()\n new_info.link = new_link['link']\n new_info.title = new_link['title']\n new_info.put()\n \n\n# crawl temp links\nclass TaskCrawlTempLinksDispatcher(BaseHandler):\n def get(self):\n # fetch entities from db\n entities = WebLinkPornTemp.query().fetch(15)\n search_list = []\n \n if entities:\n for entity in entities:\n search_list.append({'title' : entity.title, 'link' : entity.link})\n entity.key.delete()\n else:\n search_list = dict_general.default_urls\n \n # crawl website\n list_found_link = []\n while len(search_list) > 0:\n link = search_list.pop(0)['link']\n parsed_str = urlparse.urlsplit(link)\n link_base = \"{url_scheme}://{url_netloc}\".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)\n \n try:\n req = urllib2.Request(link)\n response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript\n searched_page = response.read()\n soup = BeautifulSoup(searched_page)\n \n for found_link in soup.find_all('a'):\n if found_link.get('href'):\n match_group = re.match(\"http\", found_link.get('href'), re.I)\n full_href = \"\"\n title = \"NA\"\n \n if not match_group:\n full_href = \"{href_link_base}{sub_href}\".format(href_link_base = link_base, sub_href = found_link.get('href'))\n else:\n full_href = found_link.get('href')\n \n if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:\n title = found_link.contents[0].string\n \n list_found_link.append({'title' : title, 'link' : full_href})\n except urllib2.HTTPError, err:\n pass\n \n # store result into db\n while len(list_found_link) > 0:\n new_link = list_found_link.pop(0)\n query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])\n if query.count() == 0:\n new_info = WebLinkPornTemp()\n new_info.link = new_link['link']\n new_info.title = new_link['title']\n new_info.put()\n \n \n\n# categorize wine info\nclass TaskCategorizePornInfoDispatcher(BaseHandler):\n def get(self):\n \"\"\" cron task \"\"\"\n self._categorize()\n \n def _categorize(self):\n \"\"\" categorize wine info \"\"\"\n entities = WebLinkPornTemp.query().fetch(50) # to avoid running datastore free quota limit\n for entity in entities:\n result = re.findall(r\"video\\d+|redtube\\.com\\d+|videos\\d+|watch\\d+|viewkey=\\d+\", entity.link, re.I) # sku ; BuyWine/Item ; bwe\n query = WebLinkPorn.query(WebLinkPorn.link == entity.link)\n if result and query.count() == 0:\n new_wine_info = WebLinkPorn()\n new_wine_info.link = entity.link\n new_wine_info.title = entity.title\n new_wine_info.put()\n\n\nclass TaskCrawlTagInfo(BaseHandler):\n def get(self):\n base_url = 'http://www.xvideos.com/tags/'\n req = urllib2.Request(base_url)\n response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript\n searched_page = response.read()\n soup = BeautifulSoup(searched_page)\n \n for found_link in soup.find_all('a'):\n try:\n if found_link.get('href'):\n match_group = re.match(\"/tags/.*\", found_link.get('href'), re.I)\n \n if match_group:\n tag_name = found_link.get('href')[found_link.get('href').rfind('/') + 1:]\n tag_number = str(found_link.nextSibling).strip()\n tag_info = Tag( site = 'Xvideos',\n name = tag_name,\n number = tag_number,\n created_datetime = datetime.now())\n \n tag_info.put()\n except:\n pass\n\n# configuration\nconfig = dict_general.config_setting\n\n# app\napp = webapp2.WSGIApplication([\n webapp2.Route(r'/cron_tasks/crawl_root_links', TaskCrawlRootLinksDispatcher, name = 'crawl_root_links'),\n webapp2.Route(r'/cron_tasks/crawl_temp_links', TaskCrawlTempLinksDispatcher, name = 'crawl_temp_links'),\n webapp2.Route(r'/cron_tasks/categorize_porn_info', TaskCategorizePornInfoDispatcher, name = \"categorize_wine_info\"),\n webapp2.Route(r'/cron_tasks/crawl_tag_info', TaskCrawlTagInfo, name = 'crawl_tag_info')\n], debug=True, config=config)\n\n# log\nlogging.getLogger().setLevel(logging.DEBUG)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
#start up curses
import curses
HEIGHT = 24
WIDTH = 80
TESTING = True
curses.initscr()
stdscr = curses.newwin(HEIGHT, WIDTH, 0, 0)
curses.noecho() #don't echo keys
stdscr.keypad(1)
#function for displaying other players decision
#statement is the number of the other player's death funciton returned
#player is the other player's name
#returns 0 if other player choose to play again and this player doesn't
#want to, if player does want to then returns 1
#returns -1 if other player choose to quit to main menu
def decision(statement, player):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
decision = "play again" if statement == 1 else "return to main menu"
stdscr.addstr(3, 5, "Your Partner has decided to " + decision)
if statement == 1:
stdscr.addstr(5, 10, "Do you want to play again?")
stdscr.addstr(7, 10, "Press y for yes and n for no")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 110: #choice is n
return 0
elif choice == 121: #choice is y
return 1
elif statement == 0:
stdscr.addstr(5, 5, "You will be taken back to the main menu.")
return -1
#funciton for waiting screen for starting a game as player 1
#takes other player's name
#returns 0 if player wants to return to main menu
#returns 1 if player wants to play again
#returns -1 if while loop is exited (which shouldn't happen)
def death(player):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(100)
stdscr.addstr(3, 5, "You have died. What do you want to do?")
stdscr.addstr(5, 10, "Play Again - Press p")
stdscr.addstr(7, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
#if other player already made a decision
# statement = other player's decision
# choice = decision(statement, player)
# if statement == 1:
# send choice to other player
# if choice == 0 or choice == -1:
# return 0
# elif choice == 1:
# return 1
choice = stdscr.getch()
#send choice to other player
if choice == 114: #choice is r
return 0
elif choice == 112: #choice is p
#choice = get decision back from other player
#if choice == 1:
#print message saying other player agrees to play again
return 1
#elif choice == 0
#print message saying other player quit to main menu
#return 0
return -1
#funciton for waiting screen for starting a game as player 1
#returns 0 if player wants to return to main menu
#returns 1 if a 1st player is chosen
#returns -1 if while loop is exited (which shouldn't happen)
def join():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, "Pick a player to join")
#get list of available players from the server
#loop through them all and display them (maybe only the first 10)
#make a counter for the addstr y value and increment by 2 each loop
stdscr.addstr(5, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 114: #choice is r
return 0
#elif check if a first player has been chosen
# send this player's name to first player
# get back first player's name
# return 1
return -1
#funciton for waiting screen for starting a game as player 1
#returns 0 if player wants to return to main menu
#returns 1 if a second player is chosen
#returns -1 if while loop is exited (which shouldn't happen)
def start():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(100)
stdscr.addstr(3, 5, "Waiting for 2nd player")
stdscr.addstr(5, 10, "Return to Main Menu - Press r")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 114: #choice is r
return 0
#elif check if a second player has been chosen
# get second player's name
# send this player's name
# return 1
return -1
def pause():
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, "Paused. What do you want to do?")
stdscr.addstr(5, 10, "Continue - Press c")
stdscr.addstr(7, 10, "Swap Controls - Press s")
stdscr.addstr(9, 10, "End Game - Press e")
stdscr.refresh()
while True:
choice = stdscr.getch()
if choice == 99: #choice is c
return 1
elif choice == 115: #choice is s
return 0
elif choice == 101: #choice is e
return -1
return 1
def menu(name):
stdscr.clear()
stdscr.border(0)
stdscr.timeout(-1)
stdscr.addstr(3, 5, name + ", what do you want to do?")
stdscr.addstr(5, 10, "Play new game - Press 1")
stdscr.addstr(7, 10, "Exit - Press 4")
stdscr.refresh()
choice = stdscr.getch()
stdscr.clear()
stdscr.border(0)
if choice == 49: #choice is 1
return 1
elif choice == 52: #choice is 4
return 0
return 1
play = menu("HOPPY")
c = 1
x = 25
y = 12
player = 0
while play:
if TESTING:
stdscr.clear()
stdscr.border(0)
stdscr.addstr(y, x, str(c))
stdscr.timeout(100)
button = stdscr.getch()
if button != -1:
if button == curses.KEY_RIGHT and player == 0:
x += 1
if x >= WIDTH - 1:
x -= 1
elif button == curses.KEY_LEFT and player == 0:
x -= 1
if x <= 0:
x += 1
elif button == curses.KEY_UP and player == 1:
y -= 1
if y <= 0:
y += 1
elif button == curses.KEY_DOWN and player == 1:
y += 1
if y >= HEIGHT - 1:
y -= 1
if button == 112: #button is p
cont = pause()
if cont == -1:
c = 1
player = 0
play = menu("HOPPY")
elif cont == 0:
player = (player + 1) % 2
if TESTING:
c += 1
stdscr.keypad(0)
curses.echo()
curses.endwin()
#curse.wrapper([funciton]) sets up and exits curses for you, function is the
#code the runs in curses
#initialize curses
#curses.noecho() #don't echo keys
#curses.cbreak() or curses.raw() #react instantly to keys, raw doesn't ignore
#CTRL-Z(suspend) and CTRL-C(exit)
#stdscr.keypad(1) #read navigation key sequences for me
#deinitialize curses
#curses.nocbreak(); stdscr.keypad(0); curses.echo()
#exit curses
#curses.endwin()
|
normal
|
{
"blob_id": "a6f03340c2f60c061977fed6807703cdaeb1b7fd",
"index": 7976,
"step-1": "<mask token>\n\n\ndef decision(statement, player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n decision = 'play again' if statement == 1 else 'return to main menu'\n stdscr.addstr(3, 5, 'Your Partner has decided to ' + decision)\n if statement == 1:\n stdscr.addstr(5, 10, 'Do you want to play again?')\n stdscr.addstr(7, 10, 'Press y for yes and n for no')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 110:\n return 0\n elif choice == 121:\n return 1\n elif statement == 0:\n stdscr.addstr(5, 5, 'You will be taken back to the main menu.')\n return -1\n\n\ndef death(player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'You have died. What do you want to do?')\n stdscr.addstr(5, 10, 'Play Again - Press p')\n stdscr.addstr(7, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n elif choice == 112:\n return 1\n return -1\n\n\n<mask token>\n\n\ndef start():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'Waiting for 2nd player')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\n<mask token>\n\n\ndef menu(name):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, name + ', what do you want to do?')\n stdscr.addstr(5, 10, 'Play new game - Press 1')\n stdscr.addstr(7, 10, 'Exit - Press 4')\n stdscr.refresh()\n choice = stdscr.getch()\n stdscr.clear()\n stdscr.border(0)\n if choice == 49:\n return 1\n elif choice == 52:\n return 0\n return 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef decision(statement, player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n decision = 'play again' if statement == 1 else 'return to main menu'\n stdscr.addstr(3, 5, 'Your Partner has decided to ' + decision)\n if statement == 1:\n stdscr.addstr(5, 10, 'Do you want to play again?')\n stdscr.addstr(7, 10, 'Press y for yes and n for no')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 110:\n return 0\n elif choice == 121:\n return 1\n elif statement == 0:\n stdscr.addstr(5, 5, 'You will be taken back to the main menu.')\n return -1\n\n\ndef death(player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'You have died. What do you want to do?')\n stdscr.addstr(5, 10, 'Play Again - Press p')\n stdscr.addstr(7, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n elif choice == 112:\n return 1\n return -1\n\n\ndef join():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, 'Pick a player to join')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\ndef start():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'Waiting for 2nd player')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\n<mask token>\n\n\ndef menu(name):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, name + ', what do you want to do?')\n stdscr.addstr(5, 10, 'Play new game - Press 1')\n stdscr.addstr(7, 10, 'Exit - Press 4')\n stdscr.refresh()\n choice = stdscr.getch()\n stdscr.clear()\n stdscr.border(0)\n if choice == 49:\n return 1\n elif choice == 52:\n return 0\n return 1\n\n\n<mask token>\n",
"step-3": "<mask token>\nHEIGHT = 24\nWIDTH = 80\nTESTING = True\ncurses.initscr()\nstdscr = curses.newwin(HEIGHT, WIDTH, 0, 0)\ncurses.noecho()\nstdscr.keypad(1)\n\n\ndef decision(statement, player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n decision = 'play again' if statement == 1 else 'return to main menu'\n stdscr.addstr(3, 5, 'Your Partner has decided to ' + decision)\n if statement == 1:\n stdscr.addstr(5, 10, 'Do you want to play again?')\n stdscr.addstr(7, 10, 'Press y for yes and n for no')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 110:\n return 0\n elif choice == 121:\n return 1\n elif statement == 0:\n stdscr.addstr(5, 5, 'You will be taken back to the main menu.')\n return -1\n\n\ndef death(player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'You have died. What do you want to do?')\n stdscr.addstr(5, 10, 'Play Again - Press p')\n stdscr.addstr(7, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n elif choice == 112:\n return 1\n return -1\n\n\ndef join():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, 'Pick a player to join')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\ndef start():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'Waiting for 2nd player')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\ndef pause():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, 'Paused. What do you want to do?')\n stdscr.addstr(5, 10, 'Continue - Press c')\n stdscr.addstr(7, 10, 'Swap Controls - Press s')\n stdscr.addstr(9, 10, 'End Game - Press e')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 99:\n return 1\n elif choice == 115:\n return 0\n elif choice == 101:\n return -1\n return 1\n\n\ndef menu(name):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, name + ', what do you want to do?')\n stdscr.addstr(5, 10, 'Play new game - Press 1')\n stdscr.addstr(7, 10, 'Exit - Press 4')\n stdscr.refresh()\n choice = stdscr.getch()\n stdscr.clear()\n stdscr.border(0)\n if choice == 49:\n return 1\n elif choice == 52:\n return 0\n return 1\n\n\nplay = menu('HOPPY')\nc = 1\nx = 25\ny = 12\nplayer = 0\nwhile play:\n if TESTING:\n stdscr.clear()\n stdscr.border(0)\n stdscr.addstr(y, x, str(c))\n stdscr.timeout(100)\n button = stdscr.getch()\n if button != -1:\n if button == curses.KEY_RIGHT and player == 0:\n x += 1\n if x >= WIDTH - 1:\n x -= 1\n elif button == curses.KEY_LEFT and player == 0:\n x -= 1\n if x <= 0:\n x += 1\n elif button == curses.KEY_UP and player == 1:\n y -= 1\n if y <= 0:\n y += 1\n elif button == curses.KEY_DOWN and player == 1:\n y += 1\n if y >= HEIGHT - 1:\n y -= 1\n if button == 112:\n cont = pause()\n if cont == -1:\n c = 1\n player = 0\n play = menu('HOPPY')\n elif cont == 0:\n player = (player + 1) % 2\n if TESTING:\n c += 1\nstdscr.keypad(0)\ncurses.echo()\ncurses.endwin()\n",
"step-4": "import curses\nHEIGHT = 24\nWIDTH = 80\nTESTING = True\ncurses.initscr()\nstdscr = curses.newwin(HEIGHT, WIDTH, 0, 0)\ncurses.noecho()\nstdscr.keypad(1)\n\n\ndef decision(statement, player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n decision = 'play again' if statement == 1 else 'return to main menu'\n stdscr.addstr(3, 5, 'Your Partner has decided to ' + decision)\n if statement == 1:\n stdscr.addstr(5, 10, 'Do you want to play again?')\n stdscr.addstr(7, 10, 'Press y for yes and n for no')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 110:\n return 0\n elif choice == 121:\n return 1\n elif statement == 0:\n stdscr.addstr(5, 5, 'You will be taken back to the main menu.')\n return -1\n\n\ndef death(player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'You have died. What do you want to do?')\n stdscr.addstr(5, 10, 'Play Again - Press p')\n stdscr.addstr(7, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n elif choice == 112:\n return 1\n return -1\n\n\ndef join():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, 'Pick a player to join')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\ndef start():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, 'Waiting for 2nd player')\n stdscr.addstr(5, 10, 'Return to Main Menu - Press r')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 114:\n return 0\n return -1\n\n\ndef pause():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, 'Paused. What do you want to do?')\n stdscr.addstr(5, 10, 'Continue - Press c')\n stdscr.addstr(7, 10, 'Swap Controls - Press s')\n stdscr.addstr(9, 10, 'End Game - Press e')\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 99:\n return 1\n elif choice == 115:\n return 0\n elif choice == 101:\n return -1\n return 1\n\n\ndef menu(name):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, name + ', what do you want to do?')\n stdscr.addstr(5, 10, 'Play new game - Press 1')\n stdscr.addstr(7, 10, 'Exit - Press 4')\n stdscr.refresh()\n choice = stdscr.getch()\n stdscr.clear()\n stdscr.border(0)\n if choice == 49:\n return 1\n elif choice == 52:\n return 0\n return 1\n\n\nplay = menu('HOPPY')\nc = 1\nx = 25\ny = 12\nplayer = 0\nwhile play:\n if TESTING:\n stdscr.clear()\n stdscr.border(0)\n stdscr.addstr(y, x, str(c))\n stdscr.timeout(100)\n button = stdscr.getch()\n if button != -1:\n if button == curses.KEY_RIGHT and player == 0:\n x += 1\n if x >= WIDTH - 1:\n x -= 1\n elif button == curses.KEY_LEFT and player == 0:\n x -= 1\n if x <= 0:\n x += 1\n elif button == curses.KEY_UP and player == 1:\n y -= 1\n if y <= 0:\n y += 1\n elif button == curses.KEY_DOWN and player == 1:\n y += 1\n if y >= HEIGHT - 1:\n y -= 1\n if button == 112:\n cont = pause()\n if cont == -1:\n c = 1\n player = 0\n play = menu('HOPPY')\n elif cont == 0:\n player = (player + 1) % 2\n if TESTING:\n c += 1\nstdscr.keypad(0)\ncurses.echo()\ncurses.endwin()\n",
"step-5": "#!/usr/bin/python3\n\n#start up curses\nimport curses\n\nHEIGHT = 24\nWIDTH = 80\nTESTING = True\n\ncurses.initscr()\nstdscr = curses.newwin(HEIGHT, WIDTH, 0, 0)\ncurses.noecho() #don't echo keys\nstdscr.keypad(1)\n\n#function for displaying other players decision\n#statement is the number of the other player's death funciton returned\n#player is the other player's name\n#returns 0 if other player choose to play again and this player doesn't\n#want to, if player does want to then returns 1\n#returns -1 if other player choose to quit to main menu\ndef decision(statement, player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n decision = \"play again\" if statement == 1 else \"return to main menu\"\n stdscr.addstr(3, 5, \"Your Partner has decided to \" + decision)\n if statement == 1:\n stdscr.addstr(5, 10, \"Do you want to play again?\")\n stdscr.addstr(7, 10, \"Press y for yes and n for no\")\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 110: #choice is n\n return 0\n elif choice == 121: #choice is y\n return 1\n elif statement == 0:\n stdscr.addstr(5, 5, \"You will be taken back to the main menu.\")\n return -1\n\n#funciton for waiting screen for starting a game as player 1\n#takes other player's name\n#returns 0 if player wants to return to main menu\n#returns 1 if player wants to play again\n#returns -1 if while loop is exited (which shouldn't happen)\ndef death(player):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, \"You have died. What do you want to do?\")\n stdscr.addstr(5, 10, \"Play Again - Press p\")\n stdscr.addstr(7, 10, \"Return to Main Menu - Press r\")\n stdscr.refresh()\n while True:\n #if other player already made a decision\n # statement = other player's decision\n # choice = decision(statement, player)\n # if statement == 1:\n # send choice to other player\n # if choice == 0 or choice == -1:\n # return 0\n # elif choice == 1:\n # return 1\n choice = stdscr.getch()\n #send choice to other player\n if choice == 114: #choice is r\n return 0\n elif choice == 112: #choice is p\n #choice = get decision back from other player\n #if choice == 1:\n #print message saying other player agrees to play again\n return 1\n #elif choice == 0\n #print message saying other player quit to main menu\n #return 0\n\n return -1\n\n#funciton for waiting screen for starting a game as player 1\n#returns 0 if player wants to return to main menu\n#returns 1 if a 1st player is chosen\n#returns -1 if while loop is exited (which shouldn't happen)\ndef join():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, \"Pick a player to join\")\n #get list of available players from the server\n #loop through them all and display them (maybe only the first 10)\n #make a counter for the addstr y value and increment by 2 each loop\n stdscr.addstr(5, 10, \"Return to Main Menu - Press r\")\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n\n if choice == 114: #choice is r\n return 0\n #elif check if a first player has been chosen\n # send this player's name to first player\n # get back first player's name\n # return 1\n\n return -1\n\n#funciton for waiting screen for starting a game as player 1\n#returns 0 if player wants to return to main menu\n#returns 1 if a second player is chosen\n#returns -1 if while loop is exited (which shouldn't happen)\ndef start():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(100)\n stdscr.addstr(3, 5, \"Waiting for 2nd player\")\n stdscr.addstr(5, 10, \"Return to Main Menu - Press r\")\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n\n if choice == 114: #choice is r\n return 0\n #elif check if a second player has been chosen\n # get second player's name\n # send this player's name\n # return 1\n\n return -1\n\ndef pause():\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, \"Paused. What do you want to do?\")\n stdscr.addstr(5, 10, \"Continue - Press c\")\n stdscr.addstr(7, 10, \"Swap Controls - Press s\")\n stdscr.addstr(9, 10, \"End Game - Press e\")\n stdscr.refresh()\n while True:\n choice = stdscr.getch()\n if choice == 99: #choice is c\n return 1\n elif choice == 115: #choice is s\n return 0\n elif choice == 101: #choice is e\n return -1\n \n return 1\n\ndef menu(name):\n stdscr.clear()\n stdscr.border(0)\n stdscr.timeout(-1)\n stdscr.addstr(3, 5, name + \", what do you want to do?\")\n stdscr.addstr(5, 10, \"Play new game - Press 1\")\n stdscr.addstr(7, 10, \"Exit - Press 4\")\n stdscr.refresh()\n choice = stdscr.getch()\n\n stdscr.clear()\n stdscr.border(0)\n if choice == 49: #choice is 1\n return 1\n elif choice == 52: #choice is 4\n return 0\n \n return 1\n\nplay = menu(\"HOPPY\")\nc = 1\nx = 25\ny = 12\nplayer = 0\nwhile play:\n if TESTING:\n stdscr.clear()\n stdscr.border(0)\n stdscr.addstr(y, x, str(c))\n\n stdscr.timeout(100)\n button = stdscr.getch()\n if button != -1:\n if button == curses.KEY_RIGHT and player == 0:\n x += 1\n if x >= WIDTH - 1:\n x -= 1\n elif button == curses.KEY_LEFT and player == 0:\n x -= 1\n if x <= 0:\n x += 1\n elif button == curses.KEY_UP and player == 1:\n y -= 1\n if y <= 0:\n y += 1\n elif button == curses.KEY_DOWN and player == 1:\n y += 1\n if y >= HEIGHT - 1:\n y -= 1\n if button == 112: #button is p\n cont = pause()\n if cont == -1:\n c = 1\n player = 0\n play = menu(\"HOPPY\")\n elif cont == 0:\n player = (player + 1) % 2\n if TESTING:\n c += 1\nstdscr.keypad(0)\ncurses.echo()\ncurses.endwin()\n\n\n\n\n\n\n#curse.wrapper([funciton]) sets up and exits curses for you, function is the\n #code the runs in curses\n\n#initialize curses\n#curses.noecho() #don't echo keys\n#curses.cbreak() or curses.raw() #react instantly to keys, raw doesn't ignore\n #CTRL-Z(suspend) and CTRL-C(exit)\n#stdscr.keypad(1) #read navigation key sequences for me\n\n\n#deinitialize curses\n#curses.nocbreak(); stdscr.keypad(0); curses.echo()\n\n#exit curses\n#curses.endwin()\n",
"step-ids": [
4,
5,
8,
9,
10
]
}
|
[
4,
5,
8,
9,
10
] |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Country, TouristPlaces, Users
# Create database and create a shortcut for easier to update database
engine = create_engine('sqlite:///country_catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Creating an user
user_1 = Users(name="admin", email="[email protected]")
session.add(user_1)
session.commit()
# India
country_1 = Country(user_id=1, name="India")
session.add(country_1)
session.commit()
# Australia
country_2 = Country(user_id=1, name="Australia")
session.add(country_2)
session.commit()
# England
country_3 = Country(user_id=1, name="England")
session.add(country_3)
session.commit()
# Paris
country_4 = Country(user_id=1, name="Paris")
session.add(country_4)
session.commit()
# USA
country_5 = Country(user_id=1, name="USA")
session.add(country_5)
session.commit()
# Mexico
country_6 = Country(user_id=1, name="Mexico")
session.add(country_6)
session.commit()
# SriLanka
country_7 = Country(user_id=1, name="Srilanka")
session.add(country_7)
session.commit()
# MAldives
country_8 = Country(user_id=1, name="Maldives")
session.add(country_8)
session.commit()
# Adding touristAttractions to Countries
places = TouristPlaces(user_id=1, name="Taj Mahal",
description="Taj Mahal is mausolem by Mughal ruler Shah Jahan for his Wife Mumtaz Mahal "
"It is bultby using white marbel",
country=country_1)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Red Fort",
description="Red fort is the histroric fort in the city of Delhi,India."
"It is the main residence of the emperors of mughal Dynasty.",
country=country_1)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Canberra",
description="It is the home for National GAllery of Australia"
"and a wide varierty of cultural and historic sites",
country=country_2)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Perth",
description="The west side ofAustralia is home to the city of Perth"
"It is bordered by Indian Ocean",
country=country_2)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Tower Of London",
description="It is one of the world Heritage site"
"Other highlights are Crown Jewels Exhibition",
country=country_3)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="British Museum",
description="It contains the collection of worlds finest antiquites"
"The famous artifacts are Eglin marbles",
country=country_3)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Eiffel Tower",
description="The Eiffel-tower is wrought iron lattice"
"It is named after the Engineer Gustav Eiffel",
country=country_4)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="places of Versallies",
description="The Palce of Versallies is the Principle Royal"
"residence.",
country=country_4)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Grand Canyon Village",
description="Grand Canyon is located in south Rim of Grand Canyon"
"It is focussed on accomadating tourists visiting Grand Canyon",
country=country_5)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Statue if Liberty",
description="Statue of Liberty is Colossal neo-classical sculpture"
"In New-york Hourbor Newyork",
country=country_5)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Mexico City",
description="Mexico city is densely populated and high altitude capital Of Mexico"
"It is the home for zoo,Muesuem of modern Art.",
country=country_6)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Tulum",
description="Tulum is a town in the Carribean coatline of Mexico"
"It is well-known for beaches and ruins of Ancient Mayan port city",
country=country_6)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Colombo",
description="It is the Capital city of Srilanka"
"It sheritage is reflected in its Architecture",
country=country_7)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Kandy",
description="Kandy is the largest city of central Sri Lanka."
"It is surrounded by mountains which is home to tea Plantations.",
country=country_7)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Male",
description="It is among the tooped tourist Attractions of Maldives"
"It has considerably moderate tempaerature through out the year",
country=country_8)
session.add(places)
session.commit()
places = TouristPlaces(user_id=1, name="Sun Island",
description="It is adorned with some sparkling beaches"
"beuatigul flowers and lavish greenary that pulls a great number of tourists",
country=country_8)
session.add(places)
session.commit()
print("added countries and Tourist Places added")
|
normal
|
{
"blob_id": "21b9844fce10d16a14050a782ce7e15e3f6fb657",
"index": 5737,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsession.add(user_1)\nsession.commit()\n<mask token>\nsession.add(country_1)\nsession.commit()\n<mask token>\nsession.add(country_2)\nsession.commit()\n<mask token>\nsession.add(country_3)\nsession.commit()\n<mask token>\nsession.add(country_4)\nsession.commit()\n<mask token>\nsession.add(country_5)\nsession.commit()\n<mask token>\nsession.add(country_6)\nsession.commit()\n<mask token>\nsession.add(country_7)\nsession.commit()\n<mask token>\nsession.add(country_8)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\n<mask token>\nsession.add(places)\nsession.commit()\nprint('added countries and Tourist Places added')\n",
"step-3": "<mask token>\nengine = create_engine('sqlite:///country_catalog.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nuser_1 = Users(name='admin', email='[email protected]')\nsession.add(user_1)\nsession.commit()\ncountry_1 = Country(user_id=1, name='India')\nsession.add(country_1)\nsession.commit()\ncountry_2 = Country(user_id=1, name='Australia')\nsession.add(country_2)\nsession.commit()\ncountry_3 = Country(user_id=1, name='England')\nsession.add(country_3)\nsession.commit()\ncountry_4 = Country(user_id=1, name='Paris')\nsession.add(country_4)\nsession.commit()\ncountry_5 = Country(user_id=1, name='USA')\nsession.add(country_5)\nsession.commit()\ncountry_6 = Country(user_id=1, name='Mexico')\nsession.add(country_6)\nsession.commit()\ncountry_7 = Country(user_id=1, name='Srilanka')\nsession.add(country_7)\nsession.commit()\ncountry_8 = Country(user_id=1, name='Maldives')\nsession.add(country_8)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Taj Mahal', description=\n 'Taj Mahal is mausolem by Mughal ruler Shah Jahan for his Wife Mumtaz Mahal It is bultby using white marbel'\n , country=country_1)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Red Fort', description=\n 'Red fort is the histroric fort in the city of Delhi,India.It is the main residence of the emperors of mughal Dynasty.'\n , country=country_1)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Canberra', description=\n 'It is the home for National GAllery of Australiaand a wide varierty of cultural and historic sites'\n , country=country_2)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Perth', description=\n 'The west side ofAustralia is home to the city of PerthIt is bordered by Indian Ocean'\n , country=country_2)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Tower Of London', description=\n 'It is one of the world Heritage siteOther highlights are Crown Jewels Exhibition'\n , country=country_3)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='British Museum', description=\n 'It contains the collection of worlds finest antiquitesThe famous artifacts are Eglin marbles'\n , country=country_3)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Eiffel Tower', description=\n 'The Eiffel-tower is wrought iron latticeIt is named after the Engineer Gustav Eiffel'\n , country=country_4)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='places of Versallies', description=\n 'The Palce of Versallies is the Principle Royalresidence.', country=\n country_4)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Grand Canyon Village', description=\n 'Grand Canyon is located in south Rim of Grand CanyonIt is focussed on accomadating tourists visiting Grand Canyon'\n , country=country_5)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Statue if Liberty', description=\n 'Statue of Liberty is Colossal neo-classical sculptureIn New-york Hourbor Newyork'\n , country=country_5)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Mexico City', description=\n 'Mexico city is densely populated and high altitude capital Of MexicoIt is the home for zoo,Muesuem of modern Art.'\n , country=country_6)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Tulum', description=\n 'Tulum is a town in the Carribean coatline of MexicoIt is well-known for beaches and ruins of Ancient Mayan port city'\n , country=country_6)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Colombo', description=\n 'It is the Capital city of SrilankaIt sheritage is reflected in its Architecture'\n , country=country_7)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Kandy', description=\n 'Kandy is the largest city of central Sri Lanka.It is surrounded by mountains which is home to tea Plantations.'\n , country=country_7)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Male', description=\n 'It is among the tooped tourist Attractions of MaldivesIt has considerably moderate tempaerature through out the year'\n , country=country_8)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Sun Island', description=\n 'It is adorned with some sparkling beachesbeuatigul flowers and lavish greenary that pulls a great number of tourists'\n , country=country_8)\nsession.add(places)\nsession.commit()\nprint('added countries and Tourist Places added')\n",
"step-4": "from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Country, TouristPlaces, Users\nengine = create_engine('sqlite:///country_catalog.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nuser_1 = Users(name='admin', email='[email protected]')\nsession.add(user_1)\nsession.commit()\ncountry_1 = Country(user_id=1, name='India')\nsession.add(country_1)\nsession.commit()\ncountry_2 = Country(user_id=1, name='Australia')\nsession.add(country_2)\nsession.commit()\ncountry_3 = Country(user_id=1, name='England')\nsession.add(country_3)\nsession.commit()\ncountry_4 = Country(user_id=1, name='Paris')\nsession.add(country_4)\nsession.commit()\ncountry_5 = Country(user_id=1, name='USA')\nsession.add(country_5)\nsession.commit()\ncountry_6 = Country(user_id=1, name='Mexico')\nsession.add(country_6)\nsession.commit()\ncountry_7 = Country(user_id=1, name='Srilanka')\nsession.add(country_7)\nsession.commit()\ncountry_8 = Country(user_id=1, name='Maldives')\nsession.add(country_8)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Taj Mahal', description=\n 'Taj Mahal is mausolem by Mughal ruler Shah Jahan for his Wife Mumtaz Mahal It is bultby using white marbel'\n , country=country_1)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Red Fort', description=\n 'Red fort is the histroric fort in the city of Delhi,India.It is the main residence of the emperors of mughal Dynasty.'\n , country=country_1)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Canberra', description=\n 'It is the home for National GAllery of Australiaand a wide varierty of cultural and historic sites'\n , country=country_2)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Perth', description=\n 'The west side ofAustralia is home to the city of PerthIt is bordered by Indian Ocean'\n , country=country_2)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Tower Of London', description=\n 'It is one of the world Heritage siteOther highlights are Crown Jewels Exhibition'\n , country=country_3)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='British Museum', description=\n 'It contains the collection of worlds finest antiquitesThe famous artifacts are Eglin marbles'\n , country=country_3)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Eiffel Tower', description=\n 'The Eiffel-tower is wrought iron latticeIt is named after the Engineer Gustav Eiffel'\n , country=country_4)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='places of Versallies', description=\n 'The Palce of Versallies is the Principle Royalresidence.', country=\n country_4)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Grand Canyon Village', description=\n 'Grand Canyon is located in south Rim of Grand CanyonIt is focussed on accomadating tourists visiting Grand Canyon'\n , country=country_5)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Statue if Liberty', description=\n 'Statue of Liberty is Colossal neo-classical sculptureIn New-york Hourbor Newyork'\n , country=country_5)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Mexico City', description=\n 'Mexico city is densely populated and high altitude capital Of MexicoIt is the home for zoo,Muesuem of modern Art.'\n , country=country_6)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Tulum', description=\n 'Tulum is a town in the Carribean coatline of MexicoIt is well-known for beaches and ruins of Ancient Mayan port city'\n , country=country_6)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Colombo', description=\n 'It is the Capital city of SrilankaIt sheritage is reflected in its Architecture'\n , country=country_7)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Kandy', description=\n 'Kandy is the largest city of central Sri Lanka.It is surrounded by mountains which is home to tea Plantations.'\n , country=country_7)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Male', description=\n 'It is among the tooped tourist Attractions of MaldivesIt has considerably moderate tempaerature through out the year'\n , country=country_8)\nsession.add(places)\nsession.commit()\nplaces = TouristPlaces(user_id=1, name='Sun Island', description=\n 'It is adorned with some sparkling beachesbeuatigul flowers and lavish greenary that pulls a great number of tourists'\n , country=country_8)\nsession.add(places)\nsession.commit()\nprint('added countries and Tourist Places added')\n",
"step-5": "from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Country, TouristPlaces, Users\n\n# Create database and create a shortcut for easier to update database\nengine = create_engine('sqlite:///country_catalog.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n# Creating an user\nuser_1 = Users(name=\"admin\", email=\"[email protected]\")\nsession.add(user_1)\nsession.commit()\n\n# India\ncountry_1 = Country(user_id=1, name=\"India\")\nsession.add(country_1)\nsession.commit()\n\n\n# Australia\ncountry_2 = Country(user_id=1, name=\"Australia\")\nsession.add(country_2)\nsession.commit()\n\n# England\ncountry_3 = Country(user_id=1, name=\"England\")\nsession.add(country_3)\nsession.commit()\n\n# Paris\ncountry_4 = Country(user_id=1, name=\"Paris\")\nsession.add(country_4)\nsession.commit()\n\n# USA\ncountry_5 = Country(user_id=1, name=\"USA\")\nsession.add(country_5)\nsession.commit()\n\n# Mexico\ncountry_6 = Country(user_id=1, name=\"Mexico\")\nsession.add(country_6)\nsession.commit()\n\n# SriLanka\ncountry_7 = Country(user_id=1, name=\"Srilanka\")\nsession.add(country_7)\nsession.commit()\n\n# MAldives\ncountry_8 = Country(user_id=1, name=\"Maldives\")\nsession.add(country_8)\nsession.commit()\n\n# Adding touristAttractions to Countries\nplaces = TouristPlaces(user_id=1, name=\"Taj Mahal\",\n description=\"Taj Mahal is mausolem by Mughal ruler Shah Jahan for his Wife Mumtaz Mahal \"\n \"It is bultby using white marbel\",\n country=country_1)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Red Fort\",\n description=\"Red fort is the histroric fort in the city of Delhi,India.\"\n \"It is the main residence of the emperors of mughal Dynasty.\",\n country=country_1)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Canberra\",\n description=\"It is the home for National GAllery of Australia\"\n \"and a wide varierty of cultural and historic sites\",\n country=country_2)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Perth\",\n description=\"The west side ofAustralia is home to the city of Perth\"\n \"It is bordered by Indian Ocean\",\n country=country_2)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Tower Of London\",\n description=\"It is one of the world Heritage site\"\n \"Other highlights are Crown Jewels Exhibition\",\n country=country_3)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"British Museum\",\n description=\"It contains the collection of worlds finest antiquites\"\n \"The famous artifacts are Eglin marbles\",\n country=country_3)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Eiffel Tower\",\n description=\"The Eiffel-tower is wrought iron lattice\"\n \"It is named after the Engineer Gustav Eiffel\",\n country=country_4)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"places of Versallies\",\n description=\"The Palce of Versallies is the Principle Royal\"\n \"residence.\",\n country=country_4)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Grand Canyon Village\",\n description=\"Grand Canyon is located in south Rim of Grand Canyon\"\n \"It is focussed on accomadating tourists visiting Grand Canyon\",\n country=country_5)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Statue if Liberty\",\n description=\"Statue of Liberty is Colossal neo-classical sculpture\"\n \"In New-york Hourbor Newyork\",\n country=country_5)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Mexico City\",\n description=\"Mexico city is densely populated and high altitude capital Of Mexico\"\n \"It is the home for zoo,Muesuem of modern Art.\",\n country=country_6)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Tulum\",\n description=\"Tulum is a town in the Carribean coatline of Mexico\"\n \"It is well-known for beaches and ruins of Ancient Mayan port city\",\n country=country_6)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Colombo\",\n description=\"It is the Capital city of Srilanka\"\n \"It sheritage is reflected in its Architecture\",\n country=country_7)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Kandy\",\n description=\"Kandy is the largest city of central Sri Lanka.\"\n \"It is surrounded by mountains which is home to tea Plantations.\",\n country=country_7)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Male\",\n description=\"It is among the tooped tourist Attractions of Maldives\"\n \"It has considerably moderate tempaerature through out the year\",\n country=country_8)\nsession.add(places)\nsession.commit()\n\nplaces = TouristPlaces(user_id=1, name=\"Sun Island\",\n description=\"It is adorned with some sparkling beaches\"\n \"beuatigul flowers and lavish greenary that pulls a great number of tourists\",\n country=country_8)\nsession.add(places)\nsession.commit()\n\nprint(\"added countries and Tourist Places added\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='dcnn_visualizer', version='', packages=['dcnn_visualizer',
'dcnn_visualizer.backward_functions'], url='', license='', author=
'Aiga SUZUKI', author_email='[email protected]', description='',
requires=['numpy', 'chainer', 'chainercv'])
<|reserved_special_token_1|>
from distutils.core import setup
setup(name='dcnn_visualizer', version='', packages=['dcnn_visualizer',
'dcnn_visualizer.backward_functions'], url='', license='', author=
'Aiga SUZUKI', author_email='[email protected]', description='',
requires=['numpy', 'chainer', 'chainercv'])
|
flexible
|
{
"blob_id": "b9a75f4e106efade3a1ebdcfe66413107d7eccd0",
"index": 7884,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='dcnn_visualizer', version='', packages=['dcnn_visualizer',\n 'dcnn_visualizer.backward_functions'], url='', license='', author=\n 'Aiga SUZUKI', author_email='[email protected]', description='',\n requires=['numpy', 'chainer', 'chainercv'])\n",
"step-3": "from distutils.core import setup\nsetup(name='dcnn_visualizer', version='', packages=['dcnn_visualizer',\n 'dcnn_visualizer.backward_functions'], url='', license='', author=\n 'Aiga SUZUKI', author_email='[email protected]', description='',\n requires=['numpy', 'chainer', 'chainercv'])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.db import models
from utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
'''广告内容类别'''
name = models.CharField(verbose_name='名称',max_length=50)
key = models.CharField(verbose_name='类别键名',max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
'''广告内容'''
category = models.ForeignKey(ContentCategory,on_delete=models.PROTECT,verbose_name='类别')
title = models.CharField(verbose_name='标题',max_length=100)
url = models.CharField(verbose_name='内容链接',max_length=300)
image = models.ImageField(verbose_name='图片',null=True,blank=True)
text = models.TextField(verbose_name='内容',null=True,blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示',default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
|
normal
|
{
"blob_id": "fd96bf5595ce6ec1f95d0f7a9d1c4ff582826ac0",
"index": 1439,
"step-1": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n <mask token>\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-2": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-3": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n <mask token>\n name = models.CharField(verbose_name='名称', max_length=50)\n key = models.CharField(verbose_name='类别键名', max_length=50)\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-4": "<mask token>\n\n\nclass ContentCategory(BaseModel):\n \"\"\"广告内容类别\"\"\"\n name = models.CharField(verbose_name='名称', max_length=50)\n key = models.CharField(verbose_name='类别键名', max_length=50)\n\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Content(BaseModel):\n \"\"\"广告内容\"\"\"\n category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,\n verbose_name='类别')\n title = models.CharField(verbose_name='标题', max_length=100)\n url = models.CharField(verbose_name='内容链接', max_length=300)\n image = models.ImageField(verbose_name='图片', null=True, blank=True)\n text = models.TextField(verbose_name='内容', null=True, blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示', default=True)\n\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title\n",
"step-5": "from django.db import models\nfrom utils.models import BaseModel\n\n# Create your models here.\nclass ContentCategory(BaseModel):\n '''广告内容类别'''\n name = models.CharField(verbose_name='名称',max_length=50)\n key = models.CharField(verbose_name='类别键名',max_length=50)\n\n class Meta:\n db_table = 'tb_content_category'\n verbose_name = '广告内容类别'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\nclass Content(BaseModel):\n '''广告内容'''\n category = models.ForeignKey(ContentCategory,on_delete=models.PROTECT,verbose_name='类别')\n title = models.CharField(verbose_name='标题',max_length=100)\n url = models.CharField(verbose_name='内容链接',max_length=300)\n image = models.ImageField(verbose_name='图片',null=True,blank=True)\n text = models.TextField(verbose_name='内容',null=True,blank=True)\n sequence = models.IntegerField(verbose_name='排序')\n status = models.BooleanField(verbose_name='是否展示',default=True)\n\n class Meta:\n db_table = 'tb_content'\n verbose_name = '广告内容'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.category.name + ':' + self.title",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
import matplotlib.pyplot as pt
import numpy as np
from scipy.optimize import leastsq
####################################
# Setting up test data
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0/(sd*np.sqrt(2*np.pi))*np.exp(-(x[i] - media)**2/(2*sd**2))]
return np.array(norm)
media1 = 0
media2 = -2
std1 = 0.5
std2 = 1
x = np.linspace(-20, 20, 500)
y_real = norm(x, media1, std1) + norm(x, media2, std2)
######################################
# Solving
m, dm, sd1, sd2 = [5, 10, 1, 1]
p = [m, dm, sd1, sd2] # Initial guesses for leastsq
y_init = norm(x,m,sd1) + norm(x, m + dm, sd2) # For final comparison plot
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
plsq = leastsq(res, p, args = (y_real, x))
y_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1], plsq[0][3])
|
normal
|
{
"blob_id": "b3ce17401476afe2edfda3011d5602ba492cd705",
"index": 5817,
"step-1": "<mask token>\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\n<mask token>\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2]\ny_init = norm(x, m, sd1) + norm(x, m + dm, sd2)\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\nplsq = leastsq(res, p, args=(y_real, x))\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],\n plsq[0][3])\n",
"step-4": "import matplotlib.pyplot as pt\nimport numpy as np\nfrom scipy.optimize import leastsq\n\n\ndef norm(x, media, sd):\n norm = []\n for i in range(x.size):\n norm += [1.0 / (sd * np.sqrt(2 * np.pi)) * np.exp(-(x[i] - media) **\n 2 / (2 * sd ** 2))]\n return np.array(norm)\n\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2]\ny_init = norm(x, m, sd1) + norm(x, m + dm, sd2)\n\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n return error\n\n\nplsq = leastsq(res, p, args=(y_real, x))\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1],\n plsq[0][3])\n",
"step-5": "import matplotlib.pyplot as pt\nimport numpy as np\nfrom scipy.optimize import leastsq\n\n####################################\n# Setting up test data\n\ndef norm(x, media, sd):\n norm = []\n\n for i in range(x.size):\n norm += [1.0/(sd*np.sqrt(2*np.pi))*np.exp(-(x[i] - media)**2/(2*sd**2))]\n return np.array(norm)\n\nmedia1 = 0\nmedia2 = -2\nstd1 = 0.5\nstd2 = 1\n\nx = np.linspace(-20, 20, 500)\ny_real = norm(x, media1, std1) + norm(x, media2, std2)\n\n######################################\n# Solving\n\nm, dm, sd1, sd2 = [5, 10, 1, 1]\np = [m, dm, sd1, sd2] # Initial guesses for leastsq\ny_init = norm(x,m,sd1) + norm(x, m + dm, sd2) # For final comparison plot\n\ndef res(p, y, x):\n m, dm, sd1, sd2 = p\n\n m1 = m\n m2 = m1 + m\n y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)\n error = y - y_fit\n\n return error\n\nplsq = leastsq(res, p, args = (y_real, x))\n\ny_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1], plsq[0][3])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import config
import math
import pygame
import utils
class Rocket:
def __init__(self):
self.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];
self.angle = config.initialPosition['angle'];
self.angle = utils.wrapToPi(self.angle);
self.dh = config.game['scale']*config.rocket['height']/2; #half display height
self.dw = config.game['scale']*config.rocket['width']/2; # half display height
self.pl = 0 #left motor power
self.pr = 0 #right motor power
def draw(self, display):
pSin = math.sin(self.angle); # precalculated sin
pCos = math.cos(self.angle); # precalculated cos
#main body
pygame.draw.polygon(
display,
config.colors['green'],
[
[
self.x+self.dw*pSin+self.dh*pCos,
self.y+self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin+self.dh*pCos,
self.y-self.dw*pCos-self.dh*pSin,
], [
self.x-self.dw*pSin-self.dh*pCos,
self.y-self.dw*pCos+self.dh*pSin,
], [
self.x+self.dw*pSin-self.dh*pCos,
self.y+self.dw*pCos+self.dh*pSin,
]
]
);
#left motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pl)*pCos
+(-self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pl)*pSin
+(-self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(-5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(-5*self.dw/6)*pCos,
]
]
)
#right motor
pygame.draw.polygon(
display,
config.colors['red'],
[
[
self.x
+(-self.dh-self.dw*self.pr)*pCos
+(self.dw/2)*pSin,
self.y
-(-self.dh-self.dw*self.pr)*pSin
+(self.dw/2)*pCos,
],[
self.x
+(-self.dh)*pCos
+(self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(self.dw/6)*pCos,
],[
self.x
+(-self.dh)*pCos
+(5*self.dw/6)*pSin,
self.y
-(-self.dh)*pSin
+(5*self.dw/6)*pCos,
]
]
)
def update(self, x, y, angle, leftPower, rightPower):
self.x = x*config.game['scale'] + config.game['width']/2;
self.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];
self.angle = angle
self.angle = utils.wrapToPi(self.angle);
self.pl = leftPower;
if(self.pl<0):
self.pl = 0
elif self.pl>1:
self.pl = 1
self.pr = rightPower;
if(self.pr<0):
self.pr = 0
elif self.pr>1:
self.pr = 1
|
normal
|
{
"blob_id": "7a1a9d2e773fb783d8522f1ea51e753d5d3782e9",
"index": 7517,
"step-1": "<mask token>\n\n\nclass Rocket:\n <mask token>\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-2": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n <mask token>\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-3": "<mask token>\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-4": "import config\nimport math\nimport pygame\nimport utils\n\n\nclass Rocket:\n\n def __init__(self):\n self.x = config.initialPosition['x'] * config.game['scale'\n ] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - config.initialPosition['y'] * config.game['scale']\n self.angle = config.initialPosition['angle']\n self.angle = utils.wrapToPi(self.angle)\n self.dh = config.game['scale'] * config.rocket['height'] / 2\n self.dw = config.game['scale'] * config.rocket['width'] / 2\n self.pl = 0\n self.pr = 0\n\n def draw(self, display):\n pSin = math.sin(self.angle)\n pCos = math.cos(self.angle)\n pygame.draw.polygon(display, config.colors['green'], [[self.x + \n self.dw * pSin + self.dh * pCos, self.y + self.dw * pCos - self\n .dh * pSin], [self.x - self.dw * pSin + self.dh * pCos, self.y -\n self.dw * pCos - self.dh * pSin], [self.x - self.dw * pSin - \n self.dh * pCos, self.y - self.dw * pCos + self.dh * pSin], [\n self.x + self.dw * pSin - self.dh * pCos, self.y + self.dw *\n pCos + self.dh * pSin]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pl) * pCos + -self.dw / 2 * pSin, self\n .y - (-self.dh - self.dw * self.pl) * pSin + -self.dw / 2 *\n pCos], [self.x + -self.dh * pCos + -self.dw / 6 * pSin, self.y -\n -self.dh * pSin + -self.dw / 6 * pCos], [self.x + -self.dh *\n pCos + -5 * self.dw / 6 * pSin, self.y - -self.dh * pSin + -5 *\n self.dw / 6 * pCos]])\n pygame.draw.polygon(display, config.colors['red'], [[self.x + (-\n self.dh - self.dw * self.pr) * pCos + self.dw / 2 * pSin, self.\n y - (-self.dh - self.dw * self.pr) * pSin + self.dw / 2 * pCos],\n [self.x + -self.dh * pCos + self.dw / 6 * pSin, self.y - -self.\n dh * pSin + self.dw / 6 * pCos], [self.x + -self.dh * pCos + 5 *\n self.dw / 6 * pSin, self.y - -self.dh * pSin + 5 * self.dw / 6 *\n pCos]])\n\n def update(self, x, y, angle, leftPower, rightPower):\n self.x = x * config.game['scale'] + config.game['width'] / 2\n self.y = config.game['height'] - config.game['floorHeight'\n ] - y * config.game['scale']\n self.angle = angle\n self.angle = utils.wrapToPi(self.angle)\n self.pl = leftPower\n if self.pl < 0:\n self.pl = 0\n elif self.pl > 1:\n self.pl = 1\n self.pr = rightPower\n if self.pr < 0:\n self.pr = 0\n elif self.pr > 1:\n self.pr = 1\n",
"step-5": "import config\nimport math\nimport pygame\nimport utils\n\nclass Rocket:\n\tdef __init__(self):\n\t\tself.x = config.initialPosition['x']*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - config.initialPosition['y']*config.game['scale'];\n\n\t\tself.angle = config.initialPosition['angle'];\n\t\tself.angle = utils.wrapToPi(self.angle);\n\t\tself.dh = config.game['scale']*config.rocket['height']/2; #half display height\n\t\tself.dw = config.game['scale']*config.rocket['width']/2; # half display height\n\t\tself.pl = 0 #left motor power\n\t\tself.pr = 0 #right motor power\n\n\tdef draw(self, display):\n\t\tpSin = math.sin(self.angle); # precalculated sin\n\t\tpCos = math.cos(self.angle); # precalculated cos\n\t\t\n\t\t#main body\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['green'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x+self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin+self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos-self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x-self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y-self.dw*pCos+self.dh*pSin,\n\t\t\t\t], [\n\t\t\t\t\tself.x+self.dw*pSin-self.dh*pCos,\n\t\t\t\t\tself.y+self.dw*pCos+self.dh*pSin,\n\t\t\t\t]\n\t\t\t]\n\t\t\n\t\t);\n\n\t\t#left motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pl)*pCos\n\t\t\t\t\t+(-self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pl)*pSin\n\t\t\t\t\t+(-self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(-5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(-5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\t\t#right motor\n\t\tpygame.draw.polygon(\n\t\t\tdisplay,\n\t\t\tconfig.colors['red'],\n\t\t\t[\n\t\t\t\t[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh-self.dw*self.pr)*pCos\n\t\t\t\t\t+(self.dw/2)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh-self.dw*self.pr)*pSin\n\t\t\t\t\t+(self.dw/2)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(self.dw/6)*pCos,\n\t\t\t\t],[\n\t\t\t\t\tself.x\n\t\t\t\t\t+(-self.dh)*pCos\n\t\t\t\t\t+(5*self.dw/6)*pSin,\n\t\t\t\t\tself.y\n\t\t\t\t\t-(-self.dh)*pSin\n\t\t\t\t\t+(5*self.dw/6)*pCos,\n\t\t\t\t]\n\n\t\t\t]\n\t\t)\n\n\tdef update(self, x, y, angle, leftPower, rightPower):\n\t\tself.x = x*config.game['scale'] + config.game['width']/2;\n\t\tself.y = config.game['height'] - config.game['floorHeight'] - y*config.game['scale'];\n\n\t\tself.angle = angle\n\t\tself.angle = utils.wrapToPi(self.angle);\n\n\t\tself.pl = leftPower;\n\t\tif(self.pl<0):\n\t\t\tself.pl = 0\n\t\telif self.pl>1:\n\t\t\tself.pl = 1\n\n\t\tself.pr = rightPower;\n\t\tif(self.pr<0):\n\t\t\tself.pr = 0\n\t\telif self.pr>1:\n\t\t\tself.pr = 1\n\n\t\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
print(input()in[str(i**i+i)for i in range(11)])
num = int(input())
suma = 0
x = 0
while(suma < num):
x += 1
suma = x**x + x
print(True if suma == num else False
|
normal
|
{
"blob_id": "20fe9b68e65f6f017897bfa8e99d0c21ba1617fb",
"index": 1522,
"step-1": "print(input()in[str(i**i+i)for i in range(11)])\n\n\n\nnum = int(input())\nsuma = 0\nx = 0\nwhile(suma < num):\n x += 1\n suma = x**x + x\nprint(True if suma == num else False\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def domain_name(url):
while 'https://' in url or 'http://' in url or 'www.' in url:
url = url.replace('https://', ' '
) if 'https://' in url else url.replace('http://', ' '
) if 'http://' in url else url.replace('www.', ' ')
url = list(url)
for i in range(len(url)):
if url[i] == '.':
return ''.join(url[0:i]).strip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def domain_name(url):
while 'https://' in url or 'http://' in url or 'www.' in url:
url = url.replace('https://', ' '
) if 'https://' in url else url.replace('http://', ' '
) if 'http://' in url else url.replace('www.', ' ')
url = list(url)
for i in range(len(url)):
if url[i] == '.':
return ''.join(url[0:i]).strip()
print(domain_name(
'https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python'))
<|reserved_special_token_1|>
def domain_name(url):
while "https://" in url or "http://" in url or "www." in url:
url = url.replace("https://", ' ') if "https://" in url else url.replace("http://", ' ') if "http://" in url else url.replace("www.", ' ')
url = list(url)
for i in range(len(url)):
if url[i] == ".":
return "".join(url[0:i]).strip()
print(domain_name("https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python"))
|
flexible
|
{
"blob_id": "2b9dfd0cfd62276330f1a4f983f318076f329437",
"index": 5026,
"step-1": "<mask token>\n",
"step-2": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\n<mask token>\n",
"step-3": "def domain_name(url):\n while 'https://' in url or 'http://' in url or 'www.' in url:\n url = url.replace('https://', ' '\n ) if 'https://' in url else url.replace('http://', ' '\n ) if 'http://' in url else url.replace('www.', ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == '.':\n return ''.join(url[0:i]).strip()\n\n\nprint(domain_name(\n 'https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python'))\n",
"step-4": "def domain_name(url):\n while \"https://\" in url or \"http://\" in url or \"www.\" in url:\n url = url.replace(\"https://\", ' ') if \"https://\" in url else url.replace(\"http://\", ' ') if \"http://\" in url else url.replace(\"www.\", ' ')\n url = list(url)\n for i in range(len(url)):\n if url[i] == \".\":\n return \"\".join(url[0:i]).strip()\nprint(domain_name(\"https://www.codewars.com/kata/514a024011ea4fb54200004b/train/python\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .dependencies import have
from .syntax import PythonHighlighter
from .utils import count_locations, image_path, interface_style, natural_sort
<|reserved_special_token_1|>
# © MNELAB developers
#
# License: BSD (3-clause)
from .dependencies import have
from .syntax import PythonHighlighter
from .utils import count_locations, image_path, interface_style, natural_sort
|
flexible
|
{
"blob_id": "837534ebc953dae966154921709398ab2b2e0b33",
"index": 578,
"step-1": "<mask token>\n",
"step-2": "from .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"step-3": "# © MNELAB developers\n#\n# License: BSD (3-clause)\n\nfrom .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders
# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk
# 22062016 Python 2.7
import shutil
import os
org_GIS = raw_input("provide path to GIS folder in dropbox : eg. C:\Dropbox\Barcin_Hoyuk\AIS_Barcin_Hoyuk\AIS\GIS\\: ")
outputfolder = raw_input("provide path to output folder : eg. C:\Temp\: ")
ext = raw_input("provide extention type to be copied eg .tif or .jpg :")
os.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')
file1 = open(org_GIS + 'tempext.txt', 'r')
lines = file1.readlines()
for line in lines:
ln = line.rstrip('\n')
shutil.copy(ln, outputfolder)
file1.close()
os.system('del ' + org_GIS + 'tempext.txt')
raw_input("done!")
|
normal
|
{
"blob_id": "778cf8064fa45e3e25a66f2165dcf6885c72fb8a",
"index": 634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n<mask token>\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-3": "<mask token>\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-4": "import shutil\nimport os\norg_GIS = raw_input(\n 'provide path to GIS folder in dropbox : eg. C:\\\\Dropbox\\\\Barcin_Hoyuk\\\\AIS_Barcin_Hoyuk\\\\AIS\\\\GIS\\\\: '\n )\noutputfolder = raw_input('provide path to output folder : eg. C:\\\\Temp\\\\: ')\next = raw_input('provide extention type to be copied eg .tif or .jpg :')\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\nos.system('del ' + org_GIS + 'tempext.txt')\nraw_input('done!')\n",
"step-5": "# This script allows you to copy all files with a certain extention to a new folder without integrating the sub folders\n# Created by Maurice de Kleijn Vrije Universiteit Amsterdam Spatial Information laboratory for the datamanagement of the the archaological project Barin Hoyuk\n# 22062016 Python 2.7\n\nimport shutil\nimport os\n\norg_GIS = raw_input(\"provide path to GIS folder in dropbox : eg. C:\\Dropbox\\Barcin_Hoyuk\\AIS_Barcin_Hoyuk\\AIS\\GIS\\\\: \")\noutputfolder = raw_input(\"provide path to output folder : eg. C:\\Temp\\: \")\next = raw_input(\"provide extention type to be copied eg .tif or .jpg :\")\n\nos.system('dir ' + org_GIS + '*' + ext + ' /s/d/b >' + org_GIS + 'tempext.txt')\n\nfile1 = open(org_GIS + 'tempext.txt', 'r')\nlines = file1.readlines()\n\nfor line in lines:\n ln = line.rstrip('\\n')\n shutil.copy(ln, outputfolder)\nfile1.close()\n\nos.system('del ' + org_GIS + 'tempext.txt')\n\nraw_input(\"done!\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
with open('rosalind_ba3d.txt', 'r') as f:
kmer_length = int(f.readline().strip())
seq = f.readline().strip()
<|reserved_special_token_0|>
for offset in range(len(seq) - kmer_length + 1):
prefix = seq[offset:offset + kmer_length - 1]
suffix = seq[offset + 1:offset + kmer_length]
if prefix in dict:
dict[prefix].append(suffix)
else:
dict[prefix] = [suffix]
for key in sorted(dict):
print(key + ' -> ' + ','.join(sorted(dict[key])))
<|reserved_special_token_1|>
with open('rosalind_ba3d.txt', 'r') as f:
kmer_length = int(f.readline().strip())
seq = f.readline().strip()
dict = {}
for offset in range(len(seq) - kmer_length + 1):
prefix = seq[offset:offset + kmer_length - 1]
suffix = seq[offset + 1:offset + kmer_length]
if prefix in dict:
dict[prefix].append(suffix)
else:
dict[prefix] = [suffix]
for key in sorted(dict):
print(key + ' -> ' + ','.join(sorted(dict[key])))
<|reserved_special_token_1|>
with open('rosalind_ba3d.txt','r') as f:
kmer_length = int(f.readline().strip())
seq = f.readline().strip()
dict = {}
for offset in range(len(seq)-kmer_length+1):
prefix = seq[offset:offset+kmer_length-1]
suffix = seq[offset+1:offset+kmer_length]
if prefix in dict:
dict[prefix].append(suffix)
else:
dict[prefix] = [suffix]
for key in sorted(dict):
print(key + " -> " + ','.join(sorted(dict[key])))
|
flexible
|
{
"blob_id": "050f060bb9d3d46f8b87c9802356bd0da8f926f8",
"index": 6244,
"step-1": "<mask token>\n",
"step-2": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\n<mask token>\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n",
"step-3": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\ndict = {}\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n",
"step-4": "with open('rosalind_ba3d.txt','r') as f:\r\n\tkmer_length = int(f.readline().strip())\r\n\tseq = f.readline().strip()\r\n\r\ndict = {}\r\nfor offset in range(len(seq)-kmer_length+1):\r\n\tprefix = seq[offset:offset+kmer_length-1]\r\n\tsuffix = seq[offset+1:offset+kmer_length]\r\n\tif prefix in dict:\r\n\t\tdict[prefix].append(suffix)\r\n\telse:\r\n\t\tdict[prefix] = [suffix]\r\n\r\nfor key in sorted(dict):\r\n\tprint(key + \" -> \" + ','.join(sorted(dict[key])))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ConsoleLogger:
<|reserved_special_token_0|>
def set_level(self, level):
self.logger.setLevel(level)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConsoleLogger:
<|reserved_special_token_0|>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConsoleLogger:
<|reserved_special_token_0|>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
<|reserved_special_token_0|>
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConsoleLogger:
<|reserved_special_token_0|>
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
<|reserved_special_token_1|>
import logging
class ConsoleLogger:
handlers = [
(logging.StreamHandler,
dict(),
"[%(name)s]\t %(asctime)s [%(levelname)s] %(message)s ",
logging.DEBUG)
]
def set_level(self, level):
self.logger.setLevel(level)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def exception(self, message):
self.logger.exception(message)
def __init__(self, name=__name__, default_level=logging.DEBUG):
self.logger = logging.Logger(name)
if not self.logger.handlers or len(self.logger.handlers) < 1:
for handler_class, params, formatted, level in self.handlers:
handler = handler_class(**params)
handler.setFormatter(logging.Formatter(formatted))
handler.setLevel(level if not default_level else default_level)
self.logger.addHandler(handler)
|
flexible
|
{
"blob_id": "5299f2c66fd287be667ecbe11b8470263eafab5c",
"index": 702,
"step-1": "<mask token>\n\n\nclass ConsoleLogger:\n <mask token>\n\n def set_level(self, level):\n self.logger.setLevel(level)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name=__name__, default_level=logging.DEBUG):\n self.logger = logging.Logger(name)\n if not self.logger.handlers or len(self.logger.handlers) < 1:\n for handler_class, params, formatted, level in self.handlers:\n handler = handler_class(**params)\n handler.setFormatter(logging.Formatter(formatted))\n handler.setLevel(level if not default_level else default_level)\n self.logger.addHandler(handler)\n",
"step-2": "<mask token>\n\n\nclass ConsoleLogger:\n <mask token>\n\n def set_level(self, level):\n self.logger.setLevel(level)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n <mask token>\n <mask token>\n\n def exception(self, message):\n self.logger.exception(message)\n\n def __init__(self, name=__name__, default_level=logging.DEBUG):\n self.logger = logging.Logger(name)\n if not self.logger.handlers or len(self.logger.handlers) < 1:\n for handler_class, params, formatted, level in self.handlers:\n handler = handler_class(**params)\n handler.setFormatter(logging.Formatter(formatted))\n handler.setLevel(level if not default_level else default_level)\n self.logger.addHandler(handler)\n",
"step-3": "<mask token>\n\n\nclass ConsoleLogger:\n <mask token>\n\n def set_level(self, level):\n self.logger.setLevel(level)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n <mask token>\n\n def exception(self, message):\n self.logger.exception(message)\n\n def __init__(self, name=__name__, default_level=logging.DEBUG):\n self.logger = logging.Logger(name)\n if not self.logger.handlers or len(self.logger.handlers) < 1:\n for handler_class, params, formatted, level in self.handlers:\n handler = handler_class(**params)\n handler.setFormatter(logging.Formatter(formatted))\n handler.setLevel(level if not default_level else default_level)\n self.logger.addHandler(handler)\n",
"step-4": "<mask token>\n\n\nclass ConsoleLogger:\n <mask token>\n\n def set_level(self, level):\n self.logger.setLevel(level)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n def error(self, message):\n self.logger.error(message)\n\n def exception(self, message):\n self.logger.exception(message)\n\n def __init__(self, name=__name__, default_level=logging.DEBUG):\n self.logger = logging.Logger(name)\n if not self.logger.handlers or len(self.logger.handlers) < 1:\n for handler_class, params, formatted, level in self.handlers:\n handler = handler_class(**params)\n handler.setFormatter(logging.Formatter(formatted))\n handler.setLevel(level if not default_level else default_level)\n self.logger.addHandler(handler)\n",
"step-5": "import logging\n\n\nclass ConsoleLogger:\n\n handlers = [\n (logging.StreamHandler,\n dict(),\n \"[%(name)s]\\t %(asctime)s [%(levelname)s] %(message)s \",\n logging.DEBUG)\n ]\n\n def set_level(self, level):\n self.logger.setLevel(level)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n def error(self, message):\n self.logger.error(message)\n\n def exception(self, message):\n self.logger.exception(message)\n\n def __init__(self, name=__name__, default_level=logging.DEBUG):\n self.logger = logging.Logger(name)\n if not self.logger.handlers or len(self.logger.handlers) < 1:\n for handler_class, params, formatted, level in self.handlers:\n handler = handler_class(**params)\n handler.setFormatter(logging.Formatter(formatted))\n handler.setLevel(level if not default_level else default_level)\n\n self.logger.addHandler(handler)\n",
"step-ids": [
3,
6,
7,
8,
11
]
}
|
[
3,
6,
7,
8,
11
] |
#Task 4 - writing a code that prints all the commit message from repository
import requests
r = requests.get('https://api.github.com/repos/smeiklej/secu2002_2017/commits')
text = r.json()
#asking the code to print out the commit message for all rows in the text
for row in text:
print row['commit']['message']
|
normal
|
{
"blob_id": "d07046e33bbfa404c354fef3e8990a3fa0203060",
"index": 1843,
"step-1": "#Task 4 - writing a code that prints all the commit message from repository\nimport requests\nr = requests.get('https://api.github.com/repos/smeiklej/secu2002_2017/commits')\ntext = r.json()\n\n#asking the code to print out the commit message for all rows in the text\nfor row in text:\n print row['commit']['message']\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Created on May 18, 2010
@author: Abi.Mohammadi & Majid.Vesal
'''
from threading import current_thread
import copy
import time
from deltapy.core import DeltaException, Context
import deltapy.security.services as security_services
import deltapy.security.session.services as session_services
import deltapy.unique_id.services as unique_id_services
class SessionException(DeltaException):
'''
A class for handling session exceptions.
'''
pass
#class SessionContext(Context):
# '''
# A class for saving some data in session domain.
# '''
#
# def __init__(self, session):
# '''
# @param session:
# '''
#
# Context.__init__(self)
# self['__session__'] = session
#
# def __setitem__(self, key, value):
# '''
# Sets new item or updates existing item in context
#
# @param key:
# @param value:
# '''
#
# result = Context.__setitem__(self, key, value)
# self['__session__'].update()
# return result
class SessionContext(dict):
'''
A class for saving some data in session domain.
'''
def __init__(self, session):
'''
@param session:
'''
super(SessionContext, self).__init__()
self._ticket = session.get_ticket()
def __setitem__(self, key, value):
'''
Sets new item or updates existing item in context
@param key:
@param value:
'''
result = super(SessionContext, self).__setitem__(key, value)
# Updating session because of this change in session context
session_services.get_session(self._ticket, False).update()
return result
class Session:
"""
A class for storing session information.
"""
class StateEnum:
'''
A class for defining session state.
'''
ACTIVE = "Active"
INACTIVE = "Inactive"
CLOSED = "Closed"
KILLED = "Killed"
EXPIRED = "Expired"
DISABLED = "Disabled"
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime # millisecond
def get_client_ip(self):
'''
Returns the user IP address.
'''
return self._client_ip
def close(self):
'''
Closes the session.
'''
session_services.close_session(self)
def active(self, client_request):
'''
Activates the session. Sets this session to current thread.
'''
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
'''
Sets call context to session.
'''
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
'''
Returns call context.
@return {}
'''
return self._client_request.context
def get_internal_context(self):
'''
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
'''
if not hasattr(self._client_request, 'internal_context') or \
self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
'''
Returns current client request.
@rtype: ClientRequest
@return: client request
'''
return self._client_request
def get_ticket(self):
'''
Returns session ID.
@return: str
'''
return self._ticket
def get_id(self):
'''
Returns session ID.
@return: int
'''
return self._id
def get_user(self):
'''
Returns the user which creates this session.
@return: user
'''
return security_services.get_user(self._user_id)
def get_user_id(self):
'''
Returns the user which creates this session.
@return: user
'''
return self._user_id
def update(self):
'''
Updates session.
'''
session_services.update_session(self)
def cleanup(self):
'''
Cleanups the session.
'''
session_services.cleanup_session(self)
def get_state(self):
'''
Returns the session state.
@return: str
'''
return self._state
def set_state(self, state):
'''
Returns the session state.
@return: str
'''
self._state = state
self.update()
def get_creation_date(self):
'''
Returns the session creation date.
@return:
'''
return time.ctime(self._create_date)
def get_context(self):
'''
Returns session context.
@return: SessionContext
'''
return self._context
def __str__(self):
return "%s[%s]" % (self.__class__.__name__, self.get_ticket())
def __repr__(self):
return "%s[%s]" % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
# 300 seconds waite is the tolerance !
# The unit of lifetime is millisecond
if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:
return True
return False
|
normal
|
{
"blob_id": "80469fd945a21c1bd2b5590047016a4b60880c88",
"index": 7006,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n <mask token>\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n <mask token>\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-2": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n <mask token>\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-3": "<mask token>\n\n\nclass SessionContext(dict):\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n \"\"\"\n result = super(SessionContext, self).__setitem__(key, value)\n session_services.get_session(self._ticket, False).update()\n return result\n\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n\n def __str__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-4": "<mask token>\n\n\nclass SessionException(DeltaException):\n <mask token>\n pass\n\n\nclass SessionContext(dict):\n \"\"\"\n A class for saving some data in session domain.\n \"\"\"\n\n def __init__(self, session):\n \"\"\"\n @param session:\n \"\"\"\n super(SessionContext, self).__init__()\n self._ticket = session.get_ticket()\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n \"\"\"\n result = super(SessionContext, self).__setitem__(key, value)\n session_services.get_session(self._ticket, False).update()\n return result\n\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n\n def __str__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-5": "'''\nCreated on May 18, 2010\n\n@author: Abi.Mohammadi & Majid.Vesal\n'''\n\nfrom threading import current_thread\n\nimport copy\nimport time\n\nfrom deltapy.core import DeltaException, Context\n\nimport deltapy.security.services as security_services\nimport deltapy.security.session.services as session_services\nimport deltapy.unique_id.services as unique_id_services\n\nclass SessionException(DeltaException):\n '''\n A class for handling session exceptions.\n '''\n pass\n\n#class SessionContext(Context):\n# '''\n# A class for saving some data in session domain.\n# '''\n# \n# def __init__(self, session):\n# '''\n# @param session:\n# '''\n# \n# Context.__init__(self)\n# self['__session__'] = session\n# \n# def __setitem__(self, key, value):\n# '''\n# Sets new item or updates existing item in context\n# \n# @param key:\n# @param value:\n# '''\n# \n# result = Context.__setitem__(self, key, value)\n# self['__session__'].update()\n# return result\n\nclass SessionContext(dict):\n '''\n A class for saving some data in session domain.\n '''\n \n def __init__(self, session):\n '''\n @param session:\n '''\n \n super(SessionContext, self).__init__()\n self._ticket = session.get_ticket()\n \n def __setitem__(self, key, value):\n '''\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n '''\n result = super(SessionContext, self).__setitem__(key, value)\n \n # Updating session because of this change in session context\n session_services.get_session(self._ticket, False).update()\n \n return result\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n class StateEnum:\n '''\n A class for defining session state.\n '''\n ACTIVE = \"Active\"\n INACTIVE = \"Inactive\"\n CLOSED = \"Closed\"\n KILLED = \"Killed\"\n EXPIRED = \"Expired\"\n DISABLED = \"Disabled\"\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime # millisecond\n \n def get_client_ip(self):\n '''\n Returns the user IP address.\n '''\n \n return self._client_ip\n \n def close(self):\n '''\n Closes the session.\n '''\n session_services.close_session(self)\n \n def active(self, client_request):\n '''\n Activates the session. Sets this session to current thread.\n '''\n \n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n \n def _set_client_request(self, client_request):\n '''\n Sets call context to session.\n '''\n \n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n \n def get_call_context(self):\n '''\n Returns call context.\n \n @return {}\n '''\n \n return self._client_request.context\n\n def get_internal_context(self):\n '''\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n '''\n\n if not hasattr(self._client_request, 'internal_context') or \\\n self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n\n return self._client_request.internal_context\n \n def get_client_request(self):\n '''\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n '''\n \n return self._client_request\n\n def get_ticket(self):\n '''\n Returns session ID.\n \n @return: str\n '''\n \n return self._ticket\n \n def get_id(self):\n '''\n Returns session ID.\n \n @return: int\n '''\n \n return self._id\n \n\n def get_user(self):\n '''\n Returns the user which creates this session.\n \n @return: user\n '''\n \n return security_services.get_user(self._user_id)\n \n def get_user_id(self):\n '''\n Returns the user which creates this session.\n \n @return: user\n '''\n \n return self._user_id\n\n def update(self):\n '''\n Updates session.\n '''\n \n session_services.update_session(self)\n \n def cleanup(self):\n '''\n Cleanups the session.\n '''\n \n session_services.cleanup_session(self)\n \n def get_state(self):\n '''\n Returns the session state.\n \n @return: str\n '''\n \n return self._state\n \n def set_state(self, state):\n '''\n Returns the session state.\n \n @return: str\n '''\n \n self._state = state\n self.update()\n\n def get_creation_date(self):\n '''\n Returns the session creation date.\n \n @return: \n '''\n \n return time.ctime(self._create_date)\n \n def get_context(self):\n '''\n Returns session context.\n \n @return: SessionContext\n '''\n \n return self._context \n \n def __str__(self):\n return \"%s[%s]\" % (self.__class__.__name__, self.get_ticket())\n \n def __repr__(self):\n return \"%s[%s]\" % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False\n",
"step-ids": [
18,
21,
25,
28,
31
]
}
|
[
18,
21,
25,
28,
31
] |
<|reserved_special_token_0|>
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
completaronDemora += 1
else:
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola += 1
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
demoraAcumulada += reloj - cola[len(cola) - 1]
completaronDemora += 1
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola -= 1
cola.pop()
print(cola)
else:
estadoServ = 0
tiempoServicioTotal += reloj - tiempoUltEvento
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm = tiempototal / reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel('tiempo')
plt.ylabel('Utilizacion promedio')
plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)
plt.ylim(0, 1)
plt.xlim(0, len(lista))
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print('Medidas de desempeño de la simulación: ')
print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)
print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))
print()
print('El reloj quedo en ', reloj)
var1 = areaQ / reloj
print('Nro promedio de cli en cola:', var1)
var2 = tiempoServicioTotal / reloj
print('Utilización promedio de los servidores:', var2)
var3 = demoraAcumulada / completaronDemora
print('Demora promedio por cliente:', var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
return -(1 / media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = 'ARRIBO'
else:
reloj = listaEventos[1]
proximoEvento = 'PARTIDA'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
completaronDemora += 1
else:
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola += 1
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
demoraAcumulada += reloj - cola[len(cola) - 1]
completaronDemora += 1
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola -= 1
cola.pop()
print(cola)
else:
estadoServ = 0
tiempoServicioTotal += reloj - tiempoUltEvento
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm = tiempototal / reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel('tiempo')
plt.ylabel('Utilizacion promedio')
plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)
plt.ylim(0, 1)
plt.xlim(0, len(lista))
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print('Medidas de desempeño de la simulación: ')
print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)
print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))
print()
print('El reloj quedo en ', reloj)
var1 = areaQ / reloj
print('Nro promedio de cli en cola:', var1)
var2 = tiempoServicioTotal / reloj
print('Utilización promedio de los servidores:', var2)
var3 = demoraAcumulada / completaronDemora
print('Demora promedio por cliente:', var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
return -(1 / media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = 'ARRIBO'
else:
reloj = listaEventos[1]
proximoEvento = 'PARTIDA'
<|reserved_special_token_0|>
listaEventos.append(generarTiempoExponencial(tiempoEntreArribos))
listaEventos.append(9999999.0)
while True:
nuevoEvento()
if proximoEvento == 'ARRIBO':
arribo()
else:
partida()
tiempoUltEvento = reloj
if reloj >= 1000:
break
medidasDesempeño()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
completaronDemora += 1
else:
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola += 1
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
demoraAcumulada += reloj - cola[len(cola) - 1]
completaronDemora += 1
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola -= 1
cola.pop()
print(cola)
else:
estadoServ = 0
tiempoServicioTotal += reloj - tiempoUltEvento
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm = tiempototal / reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel('tiempo')
plt.ylabel('Utilizacion promedio')
plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)
plt.ylim(0, 1)
plt.xlim(0, len(lista))
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print('Medidas de desempeño de la simulación: ')
print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)
print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))
print()
print('El reloj quedo en ', reloj)
var1 = areaQ / reloj
print('Nro promedio de cli en cola:', var1)
var2 = tiempoServicioTotal / reloj
print('Utilización promedio de los servidores:', var2)
var3 = demoraAcumulada / completaronDemora
print('Demora promedio por cliente:', var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
return -(1 / media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = 'ARRIBO'
else:
reloj = listaEventos[1]
proximoEvento = 'PARTIDA'
tiempoEntreArribos = 7
tiempoDeServicio = 9
reloj = 0.0
estadoServ = 0
tiempoServicioTotal = 0.0
tiempoLibre = 0.0
demoraAcumulada = 0.0
proximoEvento = ''
listaEventos = []
cola = []
numCliEnCola = 0
areaQ = 0.0
tiempoUltEvento = 0.0
completaronDemora = 0
listaUsoServidores = []
listaEventos.append(generarTiempoExponencial(tiempoEntreArribos))
listaEventos.append(9999999.0)
while True:
nuevoEvento()
if proximoEvento == 'ARRIBO':
arribo()
else:
partida()
tiempoUltEvento = reloj
if reloj >= 1000:
break
medidasDesempeño()
<|reserved_special_token_1|>
import numpy as np
import random
import math
import matplotlib.pyplot as plt
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
completaronDemora += 1
else:
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola += 1
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
demoraAcumulada += reloj - cola[len(cola) - 1]
completaronDemora += 1
tiempoServicioTotal += reloj - tiempoUltEvento
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += numCliEnCola * (reloj - tiempoUltEvento)
numCliEnCola -= 1
cola.pop()
print(cola)
else:
estadoServ = 0
tiempoServicioTotal += reloj - tiempoUltEvento
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm = tiempototal / reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel('tiempo')
plt.ylabel('Utilizacion promedio')
plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)
plt.ylim(0, 1)
plt.xlim(0, len(lista))
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print('Medidas de desempeño de la simulación: ')
print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)
print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))
print()
print('El reloj quedo en ', reloj)
var1 = areaQ / reloj
print('Nro promedio de cli en cola:', var1)
var2 = tiempoServicioTotal / reloj
print('Utilización promedio de los servidores:', var2)
var3 = demoraAcumulada / completaronDemora
print('Demora promedio por cliente:', var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
return -(1 / media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = 'ARRIBO'
else:
reloj = listaEventos[1]
proximoEvento = 'PARTIDA'
tiempoEntreArribos = 7
tiempoDeServicio = 9
reloj = 0.0
estadoServ = 0
tiempoServicioTotal = 0.0
tiempoLibre = 0.0
demoraAcumulada = 0.0
proximoEvento = ''
listaEventos = []
cola = []
numCliEnCola = 0
areaQ = 0.0
tiempoUltEvento = 0.0
completaronDemora = 0
listaUsoServidores = []
listaEventos.append(generarTiempoExponencial(tiempoEntreArribos))
listaEventos.append(9999999.0)
while True:
nuevoEvento()
if proximoEvento == 'ARRIBO':
arribo()
else:
partida()
tiempoUltEvento = reloj
if reloj >= 1000:
break
medidasDesempeño()
<|reserved_special_token_1|>
# Simulador de sistema M/M/1.
#
# Variables de respuesta:
# - Demora promedio por cliente
# - Número promedio de clientes en cola
# - Utilización promedio de cliente
#
# Funciones:
# arribo()
# partida()
# nuevoEvento()
# medidasDesempeño()
# generarTiempoExponencial(t)
# generarHisotgrama(lista)
import numpy as np
import random
import math
import matplotlib.pyplot as plt
def arribo():
global reloj
global tiempoUltEvento
global estadoServ
global tiempoServicioTotal
global areaQ
global numCliEnCola
global cola
global tiempoLibre
global completaronDemora
# Siguiente arribo
listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)
if estadoServ == 0:
# Servidor pasa a 1, ocupado
estadoServ = 1
tiempoLibre += reloj - tiempoUltEvento
# Programo el próximo evento partida
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
# Actualizo la cantidad de clientes que completaron la demora
completaronDemora += 1
else:
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
listaUsoServidores.append(tiempoServicioTotal / reloj)
areaQ += (numCliEnCola * (reloj - tiempoUltEvento))
numCliEnCola += 1
# Agrego el cliente a la cola
cola.append(reloj)
print(cola)
def partida():
global numCliEnCola
global tiempoServicioTotal
global areaQ
global demoraAcumulada
global completaronDemora
global estadoServ
global listaUsoServidores
if numCliEnCola > 0:
# Proxima partida
listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)
# Acumulo la demora acumulada como el valor actual del reloj
# menos el valor del reloj cuando el cliente ingresó a la cola
demoraAcumulada += reloj - cola[len(cola)-1]
# Actualizo el contador de clientes que completaron la demora
completaronDemora += 1
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
listaUsoServidores.append(tiempoServicioTotal/reloj)
# Calculo el Área bajo Q(t) del período anterior (Reloj - TiempoUltimoEvento)
areaQ += (numCliEnCola * (reloj - tiempoUltEvento))
numCliEnCola -= 1
# Saco el ultimo en llegar
cola.pop()
print(cola)
else:
# Al no haber clientes en cola, establezco el estado del servidor en "Desocupado"
estadoServ = 0
# Acumulo el tiempo de servicio
tiempoServicioTotal += (reloj - tiempoUltEvento)
# Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.
# listaUsoServidores.append(tiempoServicioTotal / reloj)
listaEventos[1] = 9999999.0
def generarHisotgrama(lista, tiempototal, reloj):
utilizacionProm=tiempototal/reloj
plt.title('Utilizacion promedio del servidor')
plt.plot(lista)
plt.xlabel("tiempo")
plt.ylabel("Utilizacion promedio")
plt.axhline(utilizacionProm, color='k', ls="dotted", xmax=1) # Comando para linea horizontal constante
plt.ylim(0, 1) # Limites para el eje Y
plt.xlim(0, len(lista)) # Limites para el eje X
plt.show()
def medidasDesempeño():
global listaUsoServidores
global reloj
global tiempoLibre
global areaQ
global tiempoServicioTotal
global demoraAcumulada
global completaronDemora
print("Medidas de desempeño de la simulación: ")
print("TIEMPO LIBRE DEL SERVIDOR %s" % tiempoLibre)
print("PORCENTAJE DE TIEMPO LIBRE %s" % (tiempoLibre / reloj))
print()
print("El reloj quedo en ", reloj)
var1 = areaQ / reloj
print("Nro promedio de cli en cola:", var1)
var2 = tiempoServicioTotal / reloj
print("Utilización promedio de los servidores:", var2)
var3 = demoraAcumulada / completaronDemora
print("Demora promedio por cliente:", var3)
generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)
def generarTiempoExponencial(media):
# return np.random.exponential(media)
return -(1/media) * math.log(random.random())
def nuevoEvento():
global reloj
global proximoEvento
global listaEventos
if listaEventos[0] <= listaEventos[1]:
reloj = listaEventos[0]
proximoEvento = "ARRIBO"
else:
reloj = listaEventos[1]
proximoEvento = "PARTIDA"
#Inicio del programa principal
#Tiempo de arribo y servicio del modelo:
tiempoEntreArribos = 7
tiempoDeServicio = 9
#Inicializacion de variables
reloj = 0.0
estadoServ = 0
tiempoServicioTotal = 0.0
tiempoLibre = 0.0
demoraAcumulada = 0.0
proximoEvento = ""
listaEventos = []
cola = []
numCliEnCola = 0
areaQ = 0.0
tiempoUltEvento = 0.0
completaronDemora = 0
listaUsoServidores = []
# Tiempo primer evento (arribo)
listaEventos.append(generarTiempoExponencial(tiempoEntreArribos))
#
# Infinito, ya que todavia no hay clientes en el sistema
listaEventos.append(9999999.0)
while True:
nuevoEvento()
# Llamada a la rutina correspondiente en función del tipo de evento
if proximoEvento == "ARRIBO":
arribo()
else:
partida()
tiempoUltEvento = reloj
if reloj >= 1000:
break
medidasDesempeño()
|
flexible
|
{
"blob_id": "62cc731982846f08b3f3caace5df1bfafd421869",
"index": 1701,
"step-1": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\n<mask token>\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-3": "<mask token>\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\ntiempoEntreArribos = 7\ntiempoDeServicio = 9\nreloj = 0.0\nestadoServ = 0\ntiempoServicioTotal = 0.0\ntiempoLibre = 0.0\ndemoraAcumulada = 0.0\nproximoEvento = ''\nlistaEventos = []\ncola = []\nnumCliEnCola = 0\nareaQ = 0.0\ntiempoUltEvento = 0.0\ncompletaronDemora = 0\nlistaUsoServidores = []\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-4": "import numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef arribo():\n global reloj\n global tiempoUltEvento\n global estadoServ\n global tiempoServicioTotal\n global areaQ\n global numCliEnCola\n global cola\n global tiempoLibre\n global completaronDemora\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\n if estadoServ == 0:\n estadoServ = 1\n tiempoLibre += reloj - tiempoUltEvento\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n completaronDemora += 1\n else:\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola += 1\n cola.append(reloj)\n print(cola)\n\n\ndef partida():\n global numCliEnCola\n global tiempoServicioTotal\n global areaQ\n global demoraAcumulada\n global completaronDemora\n global estadoServ\n global listaUsoServidores\n if numCliEnCola > 0:\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\n demoraAcumulada += reloj - cola[len(cola) - 1]\n completaronDemora += 1\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaUsoServidores.append(tiempoServicioTotal / reloj)\n areaQ += numCliEnCola * (reloj - tiempoUltEvento)\n numCliEnCola -= 1\n cola.pop()\n print(cola)\n else:\n estadoServ = 0\n tiempoServicioTotal += reloj - tiempoUltEvento\n listaEventos[1] = 9999999.0\n\n\ndef generarHisotgrama(lista, tiempototal, reloj):\n utilizacionProm = tiempototal / reloj\n plt.title('Utilizacion promedio del servidor')\n plt.plot(lista)\n plt.xlabel('tiempo')\n plt.ylabel('Utilizacion promedio')\n plt.axhline(utilizacionProm, color='k', ls='dotted', xmax=1)\n plt.ylim(0, 1)\n plt.xlim(0, len(lista))\n plt.show()\n\n\ndef medidasDesempeño():\n global listaUsoServidores\n global reloj\n global tiempoLibre\n global areaQ\n global tiempoServicioTotal\n global demoraAcumulada\n global completaronDemora\n print('Medidas de desempeño de la simulación: ')\n print('TIEMPO LIBRE DEL SERVIDOR %s' % tiempoLibre)\n print('PORCENTAJE DE TIEMPO LIBRE %s' % (tiempoLibre / reloj))\n print()\n print('El reloj quedo en ', reloj)\n var1 = areaQ / reloj\n print('Nro promedio de cli en cola:', var1)\n var2 = tiempoServicioTotal / reloj\n print('Utilización promedio de los servidores:', var2)\n var3 = demoraAcumulada / completaronDemora\n print('Demora promedio por cliente:', var3)\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\n\n\ndef generarTiempoExponencial(media):\n return -(1 / media) * math.log(random.random())\n\n\ndef nuevoEvento():\n global reloj\n global proximoEvento\n global listaEventos\n if listaEventos[0] <= listaEventos[1]:\n reloj = listaEventos[0]\n proximoEvento = 'ARRIBO'\n else:\n reloj = listaEventos[1]\n proximoEvento = 'PARTIDA'\n\n\ntiempoEntreArribos = 7\ntiempoDeServicio = 9\nreloj = 0.0\nestadoServ = 0\ntiempoServicioTotal = 0.0\ntiempoLibre = 0.0\ndemoraAcumulada = 0.0\nproximoEvento = ''\nlistaEventos = []\ncola = []\nnumCliEnCola = 0\nareaQ = 0.0\ntiempoUltEvento = 0.0\ncompletaronDemora = 0\nlistaUsoServidores = []\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\nlistaEventos.append(9999999.0)\nwhile True:\n nuevoEvento()\n if proximoEvento == 'ARRIBO':\n arribo()\n else:\n partida()\n tiempoUltEvento = reloj\n if reloj >= 1000:\n break\nmedidasDesempeño()\n",
"step-5": "# Simulador de sistema M/M/1.\r\n#\r\n# Variables de respuesta:\r\n# - Demora promedio por cliente\r\n# - Número promedio de clientes en cola\r\n# - Utilización promedio de cliente\r\n#\r\n# Funciones:\r\n# arribo()\r\n# partida()\r\n# nuevoEvento()\r\n# medidasDesempeño()\r\n# generarTiempoExponencial(t)\r\n# generarHisotgrama(lista)\r\n\r\nimport numpy as np\r\nimport random\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\ndef arribo():\r\n\r\n global reloj\r\n global tiempoUltEvento\r\n global estadoServ\r\n global tiempoServicioTotal\r\n global areaQ\r\n global numCliEnCola\r\n global cola\r\n global tiempoLibre\r\n global completaronDemora\r\n\r\n # Siguiente arribo\r\n listaEventos[0] = reloj + generarTiempoExponencial(tiempoEntreArribos)\r\n\r\n\r\n if estadoServ == 0:\r\n # Servidor pasa a 1, ocupado\r\n estadoServ = 1\r\n tiempoLibre += reloj - tiempoUltEvento\r\n # Programo el próximo evento partida\r\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\r\n\r\n # Actualizo la cantidad de clientes que completaron la demora\r\n completaronDemora += 1\r\n\r\n else:\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n listaUsoServidores.append(tiempoServicioTotal / reloj)\r\n\r\n areaQ += (numCliEnCola * (reloj - tiempoUltEvento))\r\n numCliEnCola += 1\r\n\r\n # Agrego el cliente a la cola\r\n cola.append(reloj)\r\n print(cola)\r\n\r\ndef partida():\r\n\r\n global numCliEnCola\r\n global tiempoServicioTotal\r\n global areaQ\r\n global demoraAcumulada\r\n global completaronDemora\r\n global estadoServ\r\n global listaUsoServidores\r\n\r\n if numCliEnCola > 0:\r\n\r\n\r\n\r\n # Proxima partida\r\n listaEventos[1] = reloj + generarTiempoExponencial(tiempoDeServicio)\r\n # Acumulo la demora acumulada como el valor actual del reloj\r\n # menos el valor del reloj cuando el cliente ingresó a la cola\r\n\r\n demoraAcumulada += reloj - cola[len(cola)-1]\r\n\r\n # Actualizo el contador de clientes que completaron la demora\r\n completaronDemora += 1\r\n\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n listaUsoServidores.append(tiempoServicioTotal/reloj)\r\n\r\n # Calculo el Área bajo Q(t) del período anterior (Reloj - TiempoUltimoEvento)\r\n areaQ += (numCliEnCola * (reloj - tiempoUltEvento))\r\n numCliEnCola -= 1\r\n\r\n # Saco el ultimo en llegar\r\n cola.pop()\r\n print(cola)\r\n else:\r\n # Al no haber clientes en cola, establezco el estado del servidor en \"Desocupado\"\r\n estadoServ = 0\r\n # Acumulo el tiempo de servicio\r\n tiempoServicioTotal += (reloj - tiempoUltEvento)\r\n\r\n # Acumulo la utilizacion de cada servidor en t para hacer la grafica de como se llega al promedio de utilización.\r\n # listaUsoServidores.append(tiempoServicioTotal / reloj)\r\n listaEventos[1] = 9999999.0\r\n\r\n\r\ndef generarHisotgrama(lista, tiempototal, reloj):\r\n utilizacionProm=tiempototal/reloj\r\n plt.title('Utilizacion promedio del servidor')\r\n plt.plot(lista)\r\n plt.xlabel(\"tiempo\")\r\n plt.ylabel(\"Utilizacion promedio\")\r\n plt.axhline(utilizacionProm, color='k', ls=\"dotted\", xmax=1) # Comando para linea horizontal constante\r\n plt.ylim(0, 1) # Limites para el eje Y\r\n plt.xlim(0, len(lista)) # Limites para el eje X\r\n plt.show()\r\n\r\n\r\ndef medidasDesempeño():\r\n global listaUsoServidores\r\n global reloj\r\n global tiempoLibre\r\n global areaQ\r\n global tiempoServicioTotal\r\n global demoraAcumulada\r\n global completaronDemora\r\n\r\n print(\"Medidas de desempeño de la simulación: \")\r\n print(\"TIEMPO LIBRE DEL SERVIDOR %s\" % tiempoLibre)\r\n print(\"PORCENTAJE DE TIEMPO LIBRE %s\" % (tiempoLibre / reloj))\r\n print()\r\n print(\"El reloj quedo en \", reloj)\r\n\r\n var1 = areaQ / reloj\r\n print(\"Nro promedio de cli en cola:\", var1)\r\n\r\n var2 = tiempoServicioTotal / reloj\r\n print(\"Utilización promedio de los servidores:\", var2)\r\n\r\n var3 = demoraAcumulada / completaronDemora\r\n print(\"Demora promedio por cliente:\", var3)\r\n generarHisotgrama(listaUsoServidores, tiempoServicioTotal, reloj)\r\n\r\ndef generarTiempoExponencial(media):\r\n # return np.random.exponential(media)\r\n return -(1/media) * math.log(random.random())\r\n\r\n\r\ndef nuevoEvento():\r\n\r\n global reloj\r\n global proximoEvento\r\n global listaEventos\r\n\r\n if listaEventos[0] <= listaEventos[1]:\r\n reloj = listaEventos[0]\r\n proximoEvento = \"ARRIBO\"\r\n else:\r\n reloj = listaEventos[1]\r\n proximoEvento = \"PARTIDA\"\r\n\r\n#Inicio del programa principal\r\n#Tiempo de arribo y servicio del modelo:\r\ntiempoEntreArribos = 7\r\ntiempoDeServicio = 9\r\n\r\n#Inicializacion de variables\r\nreloj = 0.0\r\nestadoServ = 0\r\ntiempoServicioTotal = 0.0\r\ntiempoLibre = 0.0\r\ndemoraAcumulada = 0.0\r\nproximoEvento = \"\"\r\nlistaEventos = []\r\ncola = []\r\nnumCliEnCola = 0\r\nareaQ = 0.0\r\ntiempoUltEvento = 0.0\r\ncompletaronDemora = 0\r\nlistaUsoServidores = []\r\n\r\n# Tiempo primer evento (arribo)\r\nlistaEventos.append(generarTiempoExponencial(tiempoEntreArribos))\r\n#\r\n# Infinito, ya que todavia no hay clientes en el sistema\r\nlistaEventos.append(9999999.0)\r\n\r\nwhile True:\r\n nuevoEvento()\r\n\r\n # Llamada a la rutina correspondiente en función del tipo de evento\r\n if proximoEvento == \"ARRIBO\":\r\n arribo()\r\n else:\r\n partida()\r\n\r\n tiempoUltEvento = reloj\r\n\r\n if reloj >= 1000:\r\n break\r\nmedidasDesempeño()\r\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
"""This is a collection of utilities for httpy and httpy applications.
"""
import cgi
import linecache
import mimetypes
import os
import stat
import sys
from Cookie import SimpleCookie
from StringIO import StringIO
from urllib import unquote
from httpy.Response import Response
def uri_to_fs(config, resource_uri_path, defaults=[], raw=False):
"""Map a requested URI to the filesystem.
Takes a TransactionConfig object, a URI path, and a list of filenames which
should be considered default resources.
The URI path is taken to be rooted literally in the filesystem root (which
could be a site root or an application root). If it points to a directory,
we look for a default resource if any are named. If it points to a file, we
make sure the file exists.
This method can raise the following Responses:
301 Moved Permanently
400 Bad Request
403 Forbidden
404 Not Found
If successful, we return the filesystem path to the particular resource.
"""
# Knit the requested URI onto the application root.
# =================================================
if config.app_fs_root == config.site_root:
_parts = resource_uri_path.lstrip('/').split('/')
else:
uri_below_app = resource_uri_path[len(config.app_uri_root):]
_parts = uri_below_app.lstrip('/').split('/')
_parts.insert(0, config.app_fs_root)
resource_fs_path = os.sep.join(_parts)
resource_fs_path = os.path.realpath(resource_fs_path)
if raw:
return resource_fs_path
if os.path.isdir(resource_fs_path):
# Process the request as a directory.
# ===================================
if not resource_uri_path.endswith('/'):
# redirect directory requests to trailing slash
new_location = '%s/' % resource_uri_path
response = Response(301)
response.headers['Location'] = new_location
log(98, "Redirecting to trailing slash: %s" % resource_uri_path)
raise response
log(98, "Looking for these defaults: %s" % str(defaults))
default = ''
for name in defaults:
_path = os.path.join(resource_fs_path, name)
if os.path.isfile(_path):
default = _path
break
resource_fs_path = default
if not default:
log(95, "No default resource in %s" % resource_uri_path)
raise Response(403)
else:
# Process the request as a file.
# ==============================
if not os.path.exists(resource_fs_path):
log(95, "Did not find %s at %s." % ( resource_uri_path
, resource_fs_path
))
raise Response(404)
return resource_fs_path
# Following are some parsers useful for dynamic applications.
#
# While httpy.Request keeps close to the HTTP layer, any dynamic application
# will need to comprehend application-specific information encoded in the
# Request. The functions below return representations of such information as
# objects from the standard library.
#
# function uses returns
# =========================================================
# parse_query uri['query'] cgi.FieldStorage
# parse_cookie message.get('Cookie') Cookie.SimpleCookie
# parse_post raw_body cgi.FieldStorage
#
#
# These functions are not used in httpy proper and are not unittested yet.
def parse_query(request):
"""Translate request's querystring into a cgi.FieldStorage.
"""
querystring = request.uri['query']
fp = StringIO(querystring)
headers = {}
headers['content-type'] = request.message.get('content-type')
headers['content-length'] = request.message.get('content-length')
environ = {}
environ['REQUEST_METHOD'] = request.method
boundary = request.message.get('boundary')
query = cgi.FieldStorage( fp = fp
, headers = headers
, outerboundary = boundary
, environ = environ
, keep_blank_values = True
, strict_parsing = False
)
return query
def parse_cookie(request):
"""Translate request's cookie into a Cookie.SimpleCookie.
"""
raw_cookie = request.message.get('Cookie','')
return SimpleCookie(raw_cookie)
def parse_post(request):
"""Translate request's body into a cgi.FieldStorage.
"""
fp = StringIO(request.raw_body)
headers = {}
headers['content-type'] = request.message.get('content-type')
headers['content-length'] = request.message.get('content-length')
environ = {}
environ['REQUEST_METHOD'] = request.method
boundary = request.message.get('boundary')
post = cgi.FieldStorage( fp = fp
, headers = headers
, outerboundary = boundary
, environ = environ
, keep_blank_values = True
, strict_parsing = False
)
return post
# Chad's logging util.
def log(verbosity, message):
if int(os.environ.get("HTTPY_VERBOSITY", 0)) >= verbosity:
print "%d %s" % (verbosity, message)
import sys; sys.stdout.flush()
# Steve's logging util.
from StringIO import StringIO
import new, threading
class dummy_outputer:
def __init__(self): pass
def write(self,*outputs): pass
def writeln(self,*outputs): pass
def __call__(self,*outputs): pass
def dump(self): pass
def pdump(self): pass
class outputer:
"""
This is an initial implementation of an outputer class that acts
like print but adds a couple of features:
1) verbosity
2) buffering
3) output to places other than stdout
Example usage:
>>> out = outputer(1)
>>> out.write('hey')
>>> out.v2('hey','you')
>>> out.v1('hey','you')
>>> out.pdump()
heyhey you
>>> out('ack')
>>> poo = out.dump()
>>> poo
'ack '
"""
def __init__(self,verbosity=0,vlevels=5,parentFirst=None,parentContents=None):
self.parentContents=parentContents
self.first=threading.Event()
self.parentFirst = parentFirst
self.contents=StringIO()
if not self.parentContents:
for i in range(vlevels-1):
v=i+1
if v<=verbosity:
v_outputer = outputer(parentFirst=self.first,parentContents=self.contents)
else:
v_outputer = dummy_outputer()
setattr(self,'v%s'%v,v_outputer)
def write(self,*outputs):
for output in outputs:
if self.parentContents:
self.parentContents.write(str(output))
self.contents.write(str(output))
def writeln(self,*outputs):
if not outputs:
outputs=['']
if not self.first.isSet():
self.first.set()
else:
self.contents.write('\n')
if self.parentContents:
if not self.parentFirst.isSet():
self.parentFirst.set()
else:
self.parentContents.write('\n')
for output in outputs:
self.write(output)
self.write(' ')
def __call__(self,*outputs):
self.writeln(*outputs)
def dump(self):
self.contents.flush()
self.contents.seek(0)
output = self.contents.read()
self.contents=StringIO()
self.first.clear()
return output
def pdump(self):
print self.dump()
|
normal
|
{
"blob_id": "472cdca501890d1d07c7363a48532ed3a184727c",
"index": 8516,
"step-1": "\"\"\"This is a collection of utilities for httpy and httpy applications.\n\"\"\"\n\nimport cgi\nimport linecache\nimport mimetypes\nimport os\nimport stat\nimport sys\nfrom Cookie import SimpleCookie\nfrom StringIO import StringIO\nfrom urllib import unquote\n\nfrom httpy.Response import Response\n\n\ndef uri_to_fs(config, resource_uri_path, defaults=[], raw=False):\n \"\"\"Map a requested URI to the filesystem.\n\n Takes a TransactionConfig object, a URI path, and a list of filenames which\n should be considered default resources.\n\n The URI path is taken to be rooted literally in the filesystem root (which\n could be a site root or an application root). If it points to a directory,\n we look for a default resource if any are named. If it points to a file, we\n make sure the file exists.\n\n This method can raise the following Responses:\n\n 301 Moved Permanently\n 400 Bad Request\n 403 Forbidden\n 404 Not Found\n\n If successful, we return the filesystem path to the particular resource.\n\n \"\"\"\n\n # Knit the requested URI onto the application root.\n # =================================================\n\n if config.app_fs_root == config.site_root:\n _parts = resource_uri_path.lstrip('/').split('/')\n else:\n uri_below_app = resource_uri_path[len(config.app_uri_root):]\n _parts = uri_below_app.lstrip('/').split('/')\n\n _parts.insert(0, config.app_fs_root)\n resource_fs_path = os.sep.join(_parts)\n resource_fs_path = os.path.realpath(resource_fs_path)\n\n\n if raw:\n return resource_fs_path\n\n\n if os.path.isdir(resource_fs_path):\n\n # Process the request as a directory.\n # ===================================\n\n if not resource_uri_path.endswith('/'):\n # redirect directory requests to trailing slash\n new_location = '%s/' % resource_uri_path\n response = Response(301)\n response.headers['Location'] = new_location\n log(98, \"Redirecting to trailing slash: %s\" % resource_uri_path)\n raise response\n\n log(98, \"Looking for these defaults: %s\" % str(defaults))\n default = ''\n for name in defaults:\n _path = os.path.join(resource_fs_path, name)\n if os.path.isfile(_path):\n default = _path\n break\n resource_fs_path = default\n if not default:\n log(95, \"No default resource in %s\" % resource_uri_path)\n raise Response(403)\n\n else:\n\n # Process the request as a file.\n # ==============================\n\n if not os.path.exists(resource_fs_path):\n log(95, \"Did not find %s at %s.\" % ( resource_uri_path\n , resource_fs_path\n ))\n raise Response(404)\n\n\n return resource_fs_path\n\n\n\n# Following are some parsers useful for dynamic applications.\n#\n# While httpy.Request keeps close to the HTTP layer, any dynamic application\n# will need to comprehend application-specific information encoded in the\n# Request. The functions below return representations of such information as\n# objects from the standard library.\n#\n# function uses returns\n# =========================================================\n# parse_query uri['query'] cgi.FieldStorage\n# parse_cookie message.get('Cookie') Cookie.SimpleCookie\n# parse_post raw_body cgi.FieldStorage\n#\n#\n# These functions are not used in httpy proper and are not unittested yet.\n\n\ndef parse_query(request):\n \"\"\"Translate request's querystring into a cgi.FieldStorage.\n \"\"\"\n\n querystring = request.uri['query']\n fp = StringIO(querystring)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n query = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return query\n\n\ndef parse_cookie(request):\n \"\"\"Translate request's cookie into a Cookie.SimpleCookie.\n \"\"\"\n\n raw_cookie = request.message.get('Cookie','')\n return SimpleCookie(raw_cookie)\n\n\ndef parse_post(request):\n \"\"\"Translate request's body into a cgi.FieldStorage.\n \"\"\"\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post\n\n\n# Chad's logging util.\n\ndef log(verbosity, message):\n if int(os.environ.get(\"HTTPY_VERBOSITY\", 0)) >= verbosity:\n print \"%d %s\" % (verbosity, message)\n import sys; sys.stdout.flush()\n\n\n# Steve's logging util.\n\nfrom StringIO import StringIO\nimport new, threading\n\nclass dummy_outputer:\n def __init__(self): pass\n def write(self,*outputs): pass\n def writeln(self,*outputs): pass\n def __call__(self,*outputs): pass\n def dump(self): pass\n def pdump(self): pass\n\nclass outputer:\n \"\"\"\n This is an initial implementation of an outputer class that acts\n like print but adds a couple of features:\n 1) verbosity\n 2) buffering\n 3) output to places other than stdout\n\n Example usage:\n\n >>> out = outputer(1)\n >>> out.write('hey')\n >>> out.v2('hey','you')\n >>> out.v1('hey','you')\n >>> out.pdump()\n heyhey you\n >>> out('ack')\n >>> poo = out.dump()\n >>> poo\n 'ack '\n \"\"\"\n\n def __init__(self,verbosity=0,vlevels=5,parentFirst=None,parentContents=None):\n self.parentContents=parentContents\n self.first=threading.Event()\n self.parentFirst = parentFirst\n self.contents=StringIO()\n if not self.parentContents:\n for i in range(vlevels-1):\n v=i+1\n if v<=verbosity:\n v_outputer = outputer(parentFirst=self.first,parentContents=self.contents)\n else:\n v_outputer = dummy_outputer()\n setattr(self,'v%s'%v,v_outputer)\n\n def write(self,*outputs):\n for output in outputs:\n if self.parentContents:\n self.parentContents.write(str(output))\n self.contents.write(str(output))\n\n def writeln(self,*outputs):\n if not outputs:\n outputs=['']\n if not self.first.isSet():\n self.first.set()\n else:\n self.contents.write('\\n')\n if self.parentContents:\n if not self.parentFirst.isSet():\n self.parentFirst.set()\n else:\n self.parentContents.write('\\n')\n for output in outputs:\n self.write(output)\n self.write(' ')\n\n def __call__(self,*outputs):\n self.writeln(*outputs)\n\n def dump(self):\n self.contents.flush()\n self.contents.seek(0)\n output = self.contents.read()\n self.contents=StringIO()\n self.first.clear()\n return output\n\n def pdump(self):\n print self.dump()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
decoded = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "=", "."]
encoded = ["49", "48", "4B", "4A", "4D", "4C", "4F", "4E", "41", "40", "38", "3B", "3A", "3D", "3C", "3F", "3E", "31", "30", "33", "32", "35", "34", "37", "36", "29", "28", "2B", "2A", "2D", "2C", "2F", "2E", "21", "20", "23", "18", "1B", "1A", "1D", "1C", "1F", "1E", "11", "10", "13", "12", "15", "14", "17", "16", "09", "08", "0B", "0A", "0D", "0C", "0F", "0E", "01", "00", "03", "44", "57"]
def decode(value) :
out_value = ""
char = [value[i:i+2] for i in range(0, len(value), 2)]
for i in range(0, len(char)) :
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char) :
out_value = ""
char = [value[i:i+1] for i in range(0, len(value))]
for i in range(0, len(char)) :
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == "__main__" :
print("By default the program will open UserCustom.ini which should be in the directory as the program.")
user_input = str(input("Would you like to encode or decode UserCustom.ini ? (encode/decode) "))
const = "+CVars="
config = open("UserCustom.ini" , "r")
out_file = open("UserCustom.ini.out", "w")
out_value = ""
lines = config.readlines()
for i in range(0, len(lines)) :
if lines[i].startswith(const) :
value = lines[i].split(const)[-1].split("\n")[0]
if user_input.lower() == "encode" or user_input.lower() == "e" :
out_value = encode(value)
elif user_input.lower() == "decode" or user_input.lower() == "d" :
out_value = decode(value)
out_file.write(const + out_value + "\n")
else :
out_file.write(lines[i])
out_file.close()
config.close()
pass
|
normal
|
{
"blob_id": "23236cd8262eb414666db88215c01d973abf1d97",
"index": 1247,
"step-1": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\nif __name__ == '__main__':\n print(\n 'By default the program will open UserCustom.ini which should be in the directory as the program.'\n )\n user_input = str(input(\n 'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')\n )\n const = '+CVars='\n config = open('UserCustom.ini', 'r')\n out_file = open('UserCustom.ini.out', 'w')\n out_value = ''\n lines = config.readlines()\n for i in range(0, len(lines)):\n if lines[i].startswith(const):\n value = lines[i].split(const)[-1].split('\\n')[0]\n if user_input.lower() == 'encode' or user_input.lower() == 'e':\n out_value = encode(value)\n elif user_input.lower() == 'decode' or user_input.lower() == 'd':\n out_value = decode(value)\n out_file.write(const + out_value + '\\n')\n else:\n out_file.write(lines[i])\n out_file.close()\n config.close()\n pass\n",
"step-4": "decoded = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',\n 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',\n 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e',\n 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',\n 't', 'u', 'v', 'w', 'x', 'y', 'z', '=', '.']\nencoded = ['49', '48', '4B', '4A', '4D', '4C', '4F', '4E', '41', '40', '38',\n '3B', '3A', '3D', '3C', '3F', '3E', '31', '30', '33', '32', '35', '34',\n '37', '36', '29', '28', '2B', '2A', '2D', '2C', '2F', '2E', '21', '20',\n '23', '18', '1B', '1A', '1D', '1C', '1F', '1E', '11', '10', '13', '12',\n '15', '14', '17', '16', '09', '08', '0B', '0A', '0D', '0C', '0F', '0E',\n '01', '00', '03', '44', '57']\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\nif __name__ == '__main__':\n print(\n 'By default the program will open UserCustom.ini which should be in the directory as the program.'\n )\n user_input = str(input(\n 'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')\n )\n const = '+CVars='\n config = open('UserCustom.ini', 'r')\n out_file = open('UserCustom.ini.out', 'w')\n out_value = ''\n lines = config.readlines()\n for i in range(0, len(lines)):\n if lines[i].startswith(const):\n value = lines[i].split(const)[-1].split('\\n')[0]\n if user_input.lower() == 'encode' or user_input.lower() == 'e':\n out_value = encode(value)\n elif user_input.lower() == 'decode' or user_input.lower() == 'd':\n out_value = decode(value)\n out_file.write(const + out_value + '\\n')\n else:\n out_file.write(lines[i])\n out_file.close()\n config.close()\n pass\n",
"step-5": "decoded = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"=\", \".\"]\nencoded = [\"49\", \"48\", \"4B\", \"4A\", \"4D\", \"4C\", \"4F\", \"4E\", \"41\", \"40\", \"38\", \"3B\", \"3A\", \"3D\", \"3C\", \"3F\", \"3E\", \"31\", \"30\", \"33\", \"32\", \"35\", \"34\", \"37\", \"36\", \"29\", \"28\", \"2B\", \"2A\", \"2D\", \"2C\", \"2F\", \"2E\", \"21\", \"20\", \"23\", \"18\", \"1B\", \"1A\", \"1D\", \"1C\", \"1F\", \"1E\", \"11\", \"10\", \"13\", \"12\", \"15\", \"14\", \"17\", \"16\", \"09\", \"08\", \"0B\", \"0A\", \"0D\", \"0C\", \"0F\", \"0E\", \"01\", \"00\", \"03\", \"44\", \"57\"]\n\ndef decode(value) : \n out_value = \"\"\n char = [value[i:i+2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)) :\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\ndef encode(char) : \n out_value = \"\"\n char = [value[i:i+1] for i in range(0, len(value))]\n for i in range(0, len(char)) :\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\nif __name__ == \"__main__\" :\n print(\"By default the program will open UserCustom.ini which should be in the directory as the program.\")\n user_input = str(input(\"Would you like to encode or decode UserCustom.ini ? (encode/decode) \"))\n const = \"+CVars=\"\n config = open(\"UserCustom.ini\" , \"r\")\n out_file = open(\"UserCustom.ini.out\", \"w\")\n out_value = \"\"\n lines = config.readlines()\n for i in range(0, len(lines)) :\n if lines[i].startswith(const) :\n value = lines[i].split(const)[-1].split(\"\\n\")[0]\n if user_input.lower() == \"encode\" or user_input.lower() == \"e\" :\n out_value = encode(value)\n elif user_input.lower() == \"decode\" or user_input.lower() == \"d\" :\n out_value = decode(value)\n out_file.write(const + out_value + \"\\n\")\n else : \n out_file.write(lines[i]) \n out_file.close()\n config.close()\n pass",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')
df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])
df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()
df = df2.pivot(index='match_code', columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')
df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])
df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()
df = df2.pivot(index='match_code', columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
get_runs_counts_by_match()
<|reserved_special_token_1|>
import pandas as pd
from greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')
df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])
df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()
df = df2.pivot(index='match_code', columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
get_runs_counts_by_match()
<|reserved_special_token_1|>
import pandas as pd
from greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df("data/ipl_dataset.csv")
df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])
df2 = df1.groupby(['match_code','runs'], as_index=False).count()
df = df2.pivot(index='match_code',columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
get_runs_counts_by_match()
|
flexible
|
{
"blob_id": "4f06d87ec79c20206ff45ba72ab77844076be553",
"index": 9707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n",
"step-4": "import pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n",
"step-5": "\nimport pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df(\"data/ipl_dataset.csv\")\n df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])\n df2 = df1.groupby(['match_code','runs'], as_index=False).count()\n df = df2.pivot(index='match_code',columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\nget_runs_counts_by_match()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bisect import bisect_left as bisect
while True:
xp, yp = set(), set()
veneer = []
W, H = map(int, input().split())
if not W:
break
N = int(input())
for i in range(N):
x1, y1, x2, y2 = map(int, input().split())
veneer.append((x1, y1, x2, y2))
xp.add(x1)
xp.add(x2)
yp.add(y1)
yp.add(y2)
xp = list(xp)
yp = list(yp)
wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]
print()
for v in veneer:
xi1 = bisect(xp, v[0])
xi2 = bisect(xp, v[1])
yi1 = bisect(yp, v[2])
yi2 = bisect(yp, v[3])
print(xi1, yi1, xi2, yi2)
wa[yi1][xi1] += 1
wa[yi2 + 1][xi1] -=1
wa[yi1][xi2 + 1] -=1
mem = [[0 for x in xp] for y in yp]
for y, _ in enumerate(yp):
for x, _ in enumerate(xp):
mem[y][x] += wa[y][x]
if y > 0:
mem[y][x] += mem[y - 1][x]
if x > 0:
mem[y][x] += mem[y][x - 1]
print(wa[y])
|
normal
|
{
"blob_id": "e0fbb5ad6d822230865e34c1216b355f700e5cec",
"index": 7822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-3": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[(0) for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -= 1\n wa[yi1][xi2 + 1] -= 1\n mem = [[(0) for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-4": "from bisect import bisect_left as bisect\nwhile True:\n xp, yp = set(), set()\n veneer = []\n W, H = map(int, input().split())\n if not W:\n break\n N = int(input())\n for i in range(N):\n x1, y1, x2, y2 = map(int, input().split())\n veneer.append((x1, y1, x2, y2))\n xp.add(x1)\n xp.add(x2)\n yp.add(y1)\n yp.add(y2)\n xp = list(xp)\n yp = list(yp)\n wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]\n print()\n for v in veneer:\n xi1 = bisect(xp, v[0])\n xi2 = bisect(xp, v[1])\n yi1 = bisect(yp, v[2])\n yi2 = bisect(yp, v[3])\n print(xi1, yi1, xi2, yi2)\n wa[yi1][xi1] += 1\n wa[yi2 + 1][xi1] -=1\n wa[yi1][xi2 + 1] -=1\n mem = [[0 for x in xp] for y in yp]\n for y, _ in enumerate(yp):\n for x, _ in enumerate(xp):\n mem[y][x] += wa[y][x]\n if y > 0:\n mem[y][x] += mem[y - 1][x]\n if x > 0:\n mem[y][x] += mem[y][x - 1]\n print(wa[y])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('SYL_2整型数组_12 合并排序数组')
|
flexible
|
{
"blob_id": "571636be9d213d19bddfd1d04688bc0955c9eae5",
"index": 4427,
"step-1": "<mask token>\n",
"step-2": "print('SYL_2整型数组_12 合并排序数组')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
Platformer Game
"""
import arcade
import os
from Toad_arcade import Toad
# Constants
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = "PyToads - Battletoads reimplementation"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = None
# Set up the player
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
#self.player.scale = 0.8
self.player_list.append(self.player)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "28d8f9d9b39c40c43a362e57a7907c0a38a6bd05",
"index": 748,
"step-1": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n <mask token>\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport arcade\nimport os\nfrom Toad_arcade import Toad\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = 'PyToads - Battletoads reimplementation'\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nPlatformer Game\n\"\"\"\nimport arcade\nimport os\nfrom Toad_arcade import Toad\n# Constants\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = \"PyToads - Battletoads reimplementation\"\n\n# Constants used to scale our sprites from their original size\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n\n # Set the working directory (where we expect to find files) to the same\n # directory this .py file is in. You can leave this out of your own\n # code, but it is needed to easily run the examples using \"python -m\"\n # as mentioned at the top of this program.\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n \"\"\" Set up the game and initialize the variables. \"\"\"\n\n # Sprite lists\n self.player_list = None\n\n # Set up the player\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n # Set up the player\n self.score = 0\n self.player = Toad()\n\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n #self.player.scale = 0.8\n\n self.player_list.append(self.player)\n # Set the background color\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n # This command has to happen before we start drawing\n arcade.start_render()\n\n # Draw all the sprites.\n self.player_list.draw()\n\n # Put the text on the screen.\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
6,
7,
9,
12,
13
]
}
|
[
6,
7,
9,
12,
13
] |
"""Resolwe collection serializer."""
import logging
from rest_framework import serializers
from resolwe.flow.models import Collection, Data, DescriptorSchema
from resolwe.rest.fields import ProjectableJSONField
from .base import ResolweBaseSerializer
from .descriptor import DescriptorSchemaSerializer
from .fields import DictRelatedField
logger = logging.getLogger(__name__)
class BaseCollectionSerializer(ResolweBaseSerializer):
"""Base serializer for Collection objects."""
settings = ProjectableJSONField(required=False)
descriptor = ProjectableJSONField(required=False)
descriptor_schema = DictRelatedField(
queryset=DescriptorSchema.objects.all(),
serializer=DescriptorSchemaSerializer,
allow_null=True,
required=False,
)
data_count = serializers.SerializerMethodField(required=False)
status = serializers.SerializerMethodField(required=False)
def get_data_count(self, collection):
"""Return number of data objects on the collection."""
# Use 'data_count' attribute when available. It is created in the
# BaseCollectionViewSet class.
return (
collection.data_count
if hasattr(collection, "data_count")
else collection.data.count()
)
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [
Data.STATUS_ERROR,
Data.STATUS_UPLOADING,
Data.STATUS_PROCESSING,
Data.STATUS_PREPARING,
Data.STATUS_WAITING,
Data.STATUS_RESOLVING,
Data.STATUS_DONE,
]
# Use 'data_statuses' attribute when available. It is created in the
# BaseCollectionViewSet class. It contains all the distinct statuses of the
# data objects in the collection.
status_set = (
set(collection.data_statuses)
if hasattr(collection, "data_statuses")
else collection.data.values_list("status", flat=True).distinct()
)
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning(
"Could not determine the status of a collection.",
extra={"collection": collection.__dict__},
)
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = (
"created",
"descriptor_dirty",
"duplicated",
"id",
"modified",
"data_count",
"status",
)
update_protected_fields = ("contributor",)
fields = (
read_only_fields
+ update_protected_fields
+ (
"description",
"descriptor",
"descriptor_schema",
"name",
"settings",
"slug",
"tags",
)
)
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
# Use 'entity_count' attribute when available. It is created in the
# BaseCollectionViewSet class.
return (
collection.entity_count
if hasattr(collection, "entity_count")
else collection.entity_set.count()
)
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
"entity_count",
)
fields = BaseCollectionSerializer.Meta.fields + ("entity_count",)
|
normal
|
{
"blob_id": "d6f8ec0fd8be0fa7019a84af47d08ab8b5b32d92",
"index": 1449,
"step-1": "<mask token>\n\n\nclass BaseCollectionSerializer(ResolweBaseSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_status(self, collection):\n \"\"\"Return status of the collection based on the status of data objects.\n\n When collection contains no data objects None is returned.\n \"\"\"\n status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.\n STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,\n Data.STATUS_RESOLVING, Data.STATUS_DONE]\n status_set = set(collection.data_statuses) if hasattr(collection,\n 'data_statuses') else collection.data.values_list('status',\n flat=True).distinct()\n if not status_set:\n return None\n for status in status_order:\n if status in status_set:\n return status\n logger.warning('Could not determine the status of a collection.',\n extra={'collection': collection.__dict__})\n return None\n\n\n class Meta:\n \"\"\"CollectionSerializer Meta options.\"\"\"\n model = Collection\n read_only_fields = ('created', 'descriptor_dirty', 'duplicated',\n 'id', 'modified', 'data_count', 'status')\n update_protected_fields = 'contributor',\n fields = read_only_fields + update_protected_fields + ('description',\n 'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',\n 'tags')\n\n\nclass CollectionSerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Collection objects.\"\"\"\n entity_count = serializers.SerializerMethodField(required=False)\n\n def get_entity_count(self, collection):\n \"\"\"Return number of entities on the collection.\"\"\"\n return collection.entity_count if hasattr(collection, 'entity_count'\n ) else collection.entity_set.count()\n\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"CollectionSerializer Meta options.\"\"\"\n read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (\n 'entity_count',)\n fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)\n",
"step-2": "<mask token>\n\n\nclass BaseCollectionSerializer(ResolweBaseSerializer):\n <mask token>\n settings = ProjectableJSONField(required=False)\n descriptor = ProjectableJSONField(required=False)\n descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.\n all(), serializer=DescriptorSchemaSerializer, allow_null=True,\n required=False)\n data_count = serializers.SerializerMethodField(required=False)\n status = serializers.SerializerMethodField(required=False)\n\n def get_data_count(self, collection):\n \"\"\"Return number of data objects on the collection.\"\"\"\n return collection.data_count if hasattr(collection, 'data_count'\n ) else collection.data.count()\n\n def get_status(self, collection):\n \"\"\"Return status of the collection based on the status of data objects.\n\n When collection contains no data objects None is returned.\n \"\"\"\n status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.\n STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,\n Data.STATUS_RESOLVING, Data.STATUS_DONE]\n status_set = set(collection.data_statuses) if hasattr(collection,\n 'data_statuses') else collection.data.values_list('status',\n flat=True).distinct()\n if not status_set:\n return None\n for status in status_order:\n if status in status_set:\n return status\n logger.warning('Could not determine the status of a collection.',\n extra={'collection': collection.__dict__})\n return None\n\n\n class Meta:\n \"\"\"CollectionSerializer Meta options.\"\"\"\n model = Collection\n read_only_fields = ('created', 'descriptor_dirty', 'duplicated',\n 'id', 'modified', 'data_count', 'status')\n update_protected_fields = 'contributor',\n fields = read_only_fields + update_protected_fields + ('description',\n 'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',\n 'tags')\n\n\nclass CollectionSerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Collection objects.\"\"\"\n entity_count = serializers.SerializerMethodField(required=False)\n\n def get_entity_count(self, collection):\n \"\"\"Return number of entities on the collection.\"\"\"\n return collection.entity_count if hasattr(collection, 'entity_count'\n ) else collection.entity_set.count()\n\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"CollectionSerializer Meta options.\"\"\"\n read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (\n 'entity_count',)\n fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)\n",
"step-3": "<mask token>\n\n\nclass BaseCollectionSerializer(ResolweBaseSerializer):\n \"\"\"Base serializer for Collection objects.\"\"\"\n settings = ProjectableJSONField(required=False)\n descriptor = ProjectableJSONField(required=False)\n descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.\n all(), serializer=DescriptorSchemaSerializer, allow_null=True,\n required=False)\n data_count = serializers.SerializerMethodField(required=False)\n status = serializers.SerializerMethodField(required=False)\n\n def get_data_count(self, collection):\n \"\"\"Return number of data objects on the collection.\"\"\"\n return collection.data_count if hasattr(collection, 'data_count'\n ) else collection.data.count()\n\n def get_status(self, collection):\n \"\"\"Return status of the collection based on the status of data objects.\n\n When collection contains no data objects None is returned.\n \"\"\"\n status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.\n STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,\n Data.STATUS_RESOLVING, Data.STATUS_DONE]\n status_set = set(collection.data_statuses) if hasattr(collection,\n 'data_statuses') else collection.data.values_list('status',\n flat=True).distinct()\n if not status_set:\n return None\n for status in status_order:\n if status in status_set:\n return status\n logger.warning('Could not determine the status of a collection.',\n extra={'collection': collection.__dict__})\n return None\n\n\n class Meta:\n \"\"\"CollectionSerializer Meta options.\"\"\"\n model = Collection\n read_only_fields = ('created', 'descriptor_dirty', 'duplicated',\n 'id', 'modified', 'data_count', 'status')\n update_protected_fields = 'contributor',\n fields = read_only_fields + update_protected_fields + ('description',\n 'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',\n 'tags')\n\n\nclass CollectionSerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Collection objects.\"\"\"\n entity_count = serializers.SerializerMethodField(required=False)\n\n def get_entity_count(self, collection):\n \"\"\"Return number of entities on the collection.\"\"\"\n return collection.entity_count if hasattr(collection, 'entity_count'\n ) else collection.entity_set.count()\n\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"CollectionSerializer Meta options.\"\"\"\n read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (\n 'entity_count',)\n fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)\n",
"step-4": "<mask token>\nimport logging\nfrom rest_framework import serializers\nfrom resolwe.flow.models import Collection, Data, DescriptorSchema\nfrom resolwe.rest.fields import ProjectableJSONField\nfrom .base import ResolweBaseSerializer\nfrom .descriptor import DescriptorSchemaSerializer\nfrom .fields import DictRelatedField\nlogger = logging.getLogger(__name__)\n\n\nclass BaseCollectionSerializer(ResolweBaseSerializer):\n \"\"\"Base serializer for Collection objects.\"\"\"\n settings = ProjectableJSONField(required=False)\n descriptor = ProjectableJSONField(required=False)\n descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.\n all(), serializer=DescriptorSchemaSerializer, allow_null=True,\n required=False)\n data_count = serializers.SerializerMethodField(required=False)\n status = serializers.SerializerMethodField(required=False)\n\n def get_data_count(self, collection):\n \"\"\"Return number of data objects on the collection.\"\"\"\n return collection.data_count if hasattr(collection, 'data_count'\n ) else collection.data.count()\n\n def get_status(self, collection):\n \"\"\"Return status of the collection based on the status of data objects.\n\n When collection contains no data objects None is returned.\n \"\"\"\n status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.\n STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,\n Data.STATUS_RESOLVING, Data.STATUS_DONE]\n status_set = set(collection.data_statuses) if hasattr(collection,\n 'data_statuses') else collection.data.values_list('status',\n flat=True).distinct()\n if not status_set:\n return None\n for status in status_order:\n if status in status_set:\n return status\n logger.warning('Could not determine the status of a collection.',\n extra={'collection': collection.__dict__})\n return None\n\n\n class Meta:\n \"\"\"CollectionSerializer Meta options.\"\"\"\n model = Collection\n read_only_fields = ('created', 'descriptor_dirty', 'duplicated',\n 'id', 'modified', 'data_count', 'status')\n update_protected_fields = 'contributor',\n fields = read_only_fields + update_protected_fields + ('description',\n 'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',\n 'tags')\n\n\nclass CollectionSerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Collection objects.\"\"\"\n entity_count = serializers.SerializerMethodField(required=False)\n\n def get_entity_count(self, collection):\n \"\"\"Return number of entities on the collection.\"\"\"\n return collection.entity_count if hasattr(collection, 'entity_count'\n ) else collection.entity_set.count()\n\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"CollectionSerializer Meta options.\"\"\"\n read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (\n 'entity_count',)\n fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)\n",
"step-5": "\"\"\"Resolwe collection serializer.\"\"\"\nimport logging\n\nfrom rest_framework import serializers\n\nfrom resolwe.flow.models import Collection, Data, DescriptorSchema\nfrom resolwe.rest.fields import ProjectableJSONField\n\nfrom .base import ResolweBaseSerializer\nfrom .descriptor import DescriptorSchemaSerializer\nfrom .fields import DictRelatedField\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseCollectionSerializer(ResolweBaseSerializer):\n \"\"\"Base serializer for Collection objects.\"\"\"\n\n settings = ProjectableJSONField(required=False)\n descriptor = ProjectableJSONField(required=False)\n descriptor_schema = DictRelatedField(\n queryset=DescriptorSchema.objects.all(),\n serializer=DescriptorSchemaSerializer,\n allow_null=True,\n required=False,\n )\n data_count = serializers.SerializerMethodField(required=False)\n status = serializers.SerializerMethodField(required=False)\n\n def get_data_count(self, collection):\n \"\"\"Return number of data objects on the collection.\"\"\"\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )\n\n def get_status(self, collection):\n \"\"\"Return status of the collection based on the status of data objects.\n\n When collection contains no data objects None is returned.\n \"\"\"\n\n status_order = [\n Data.STATUS_ERROR,\n Data.STATUS_UPLOADING,\n Data.STATUS_PROCESSING,\n Data.STATUS_PREPARING,\n Data.STATUS_WAITING,\n Data.STATUS_RESOLVING,\n Data.STATUS_DONE,\n ]\n\n # Use 'data_statuses' attribute when available. It is created in the\n # BaseCollectionViewSet class. It contains all the distinct statuses of the\n # data objects in the collection.\n status_set = (\n set(collection.data_statuses)\n if hasattr(collection, \"data_statuses\")\n else collection.data.values_list(\"status\", flat=True).distinct()\n )\n\n if not status_set:\n return None\n\n for status in status_order:\n if status in status_set:\n return status\n\n logger.warning(\n \"Could not determine the status of a collection.\",\n extra={\"collection\": collection.__dict__},\n )\n return None\n\n class Meta:\n \"\"\"CollectionSerializer Meta options.\"\"\"\n\n model = Collection\n read_only_fields = (\n \"created\",\n \"descriptor_dirty\",\n \"duplicated\",\n \"id\",\n \"modified\",\n \"data_count\",\n \"status\",\n )\n update_protected_fields = (\"contributor\",)\n fields = (\n read_only_fields\n + update_protected_fields\n + (\n \"description\",\n \"descriptor\",\n \"descriptor_schema\",\n \"name\",\n \"settings\",\n \"slug\",\n \"tags\",\n )\n )\n\n\nclass CollectionSerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Collection objects.\"\"\"\n\n entity_count = serializers.SerializerMethodField(required=False)\n\n def get_entity_count(self, collection):\n \"\"\"Return number of entities on the collection.\"\"\"\n # Use 'entity_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.entity_count\n if hasattr(collection, \"entity_count\")\n else collection.entity_set.count()\n )\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"CollectionSerializer Meta options.\"\"\"\n\n read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (\n \"entity_count\",\n )\n\n fields = BaseCollectionSerializer.Meta.fields + (\"entity_count\",)\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
#https://www.geeksforgeeks.org/count-of-substrings-of-length-k-with-exactly-k-distinct-characters/
#https://www.geeksforgeeks.org/count-number-of-substrings-with-exactly-k-distinct-characters/
|
normal
|
{
"blob_id": "2ca40a53291a62bbdb4386decc5a2dfa84431836",
"index": 6630,
"step-1": "#https://www.geeksforgeeks.org/count-of-substrings-of-length-k-with-exactly-k-distinct-characters/\n#https://www.geeksforgeeks.org/count-number-of-substrings-with-exactly-k-distinct-characters/\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('monitor', '0001_initial')]
operations = [migrations.RemoveField(model_name='cpu', name=
'deleted_ad'), migrations.RemoveField(model_name='disk', name=
'deleted_ad'), migrations.RemoveField(model_name='ram', name=
'deleted_ad'), migrations.AlterField(model_name='server', name=
'deleted_ad', field=models.DateTimeField(blank=True, default=None))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('monitor', '0001_initial')]
operations = [migrations.RemoveField(model_name='cpu', name=
'deleted_ad'), migrations.RemoveField(model_name='disk', name=
'deleted_ad'), migrations.RemoveField(model_name='ram', name=
'deleted_ad'), migrations.AlterField(model_name='server', name=
'deleted_ad', field=models.DateTimeField(blank=True, default=None))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-12-01 16:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='cpu',
name='deleted_ad',
),
migrations.RemoveField(
model_name='disk',
name='deleted_ad',
),
migrations.RemoveField(
model_name='ram',
name='deleted_ad',
),
migrations.AlterField(
model_name='server',
name='deleted_ad',
field=models.DateTimeField(blank=True, default=None),
),
]
|
flexible
|
{
"blob_id": "1573af9cdf4817acbe80031e22489386ea7899cf",
"index": 4782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('monitor', '0001_initial')]\n operations = [migrations.RemoveField(model_name='cpu', name=\n 'deleted_ad'), migrations.RemoveField(model_name='disk', name=\n 'deleted_ad'), migrations.RemoveField(model_name='ram', name=\n 'deleted_ad'), migrations.AlterField(model_name='server', name=\n 'deleted_ad', field=models.DateTimeField(blank=True, default=None))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('monitor', '0001_initial')]\n operations = [migrations.RemoveField(model_name='cpu', name=\n 'deleted_ad'), migrations.RemoveField(model_name='disk', name=\n 'deleted_ad'), migrations.RemoveField(model_name='ram', name=\n 'deleted_ad'), migrations.AlterField(model_name='server', name=\n 'deleted_ad', field=models.DateTimeField(blank=True, default=None))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.1 on 2017-12-01 16:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('monitor', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='cpu',\n name='deleted_ad',\n ),\n migrations.RemoveField(\n model_name='disk',\n name='deleted_ad',\n ),\n migrations.RemoveField(\n model_name='ram',\n name='deleted_ad',\n ),\n migrations.AlterField(\n model_name='server',\n name='deleted_ad',\n field=models.DateTimeField(blank=True, default=None),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
while True:
hero.moveXY(35, 34)
attttaaaaacccckkkk()
hero.moveXY(47, 27)
attttaaaaacccckkkk()
hero.moveXY(60, 31)
attttaaaaacccckkkk()
<|reserved_special_token_1|>
#https://codecombat.com/play/level/village-champion
# Incoming munchkins! Defend the town!
# Define your own function to fight the enemy!
# In the function, find an enemy, then cleave or attack it.
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
#enemy = hero.findNearestEnemy()
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
# Move between patrol points and call the function.
while True:
hero.moveXY(35, 34)
# Use whatever function name you defined above.
attttaaaaacccckkkk()
hero.moveXY(47, 27)
# Call the function again.
attttaaaaacccckkkk()
hero.moveXY(60, 31)
# Call the function again.
attttaaaaacccckkkk()
|
flexible
|
{
"blob_id": "ce365e011d8cc88d9aa6b4df18ea3f4e70d48f5c",
"index": 4887,
"step-1": "<mask token>\n",
"step-2": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\n<mask token>\n",
"step-3": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\nwhile True:\n hero.moveXY(35, 34)\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n attttaaaaacccckkkk()\n",
"step-4": "#https://codecombat.com/play/level/village-champion\n# Incoming munchkins! Defend the town!\n\n# Define your own function to fight the enemy!\n# In the function, find an enemy, then cleave or attack it.\ndef attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n #enemy = hero.findNearestEnemy()\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n# Move between patrol points and call the function.\nwhile True:\n hero.moveXY(35, 34)\n # Use whatever function name you defined above.\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n # Call the function again.\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n # Call the function again.\n attttaaaaacccckkkk()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .routes import generate_routes
|
flexible
|
{
"blob_id": "06339e9cd506f147d03c54aee82473e233b4ec2e",
"index": 8853,
"step-1": "<mask token>\n",
"step-2": "from .routes import generate_routes\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop('username')
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop('username')
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop('username')
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
def get_user_like(query):
return User.query.filter(or_(User.username, like('%' + query + '%'))
).limit(10).all()
<|reserved_special_token_1|>
from sqlalchemy import or_
from ..extensions import db
from .models import User
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop('username')
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
def get_user_like(query):
return User.query.filter(or_(User.username, like('%' + query + '%'))
).limit(10).all()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from sqlalchemy import or_
from ..extensions import db
from .models import User
def create_user(username):
user = User(username)
db.session.add(user)
return user
def get_user(user_id=None, **kwargs):
if user_id is not None:
return User.query.get(user_id)
username = kwargs.pop("username")
if username is not None:
return User.query.filter_by(username=username).first()
raise NotImplementedError
def get_user_like(query):
return User.query.filter(or_(User.username,like('%'+query+'%'))).limit(10).all()
|
flexible
|
{
"blob_id": "49c15f89225bb1dd1010510fe28dba34f6a8d085",
"index": 4866,
"step-1": "<mask token>\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username, like('%' + query + '%'))\n ).limit(10).all()\n",
"step-4": "from sqlalchemy import or_\nfrom ..extensions import db\nfrom .models import User\n\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop('username')\n if username is not None:\n return User.query.filter_by(username=username).first()\n raise NotImplementedError\n\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username, like('%' + query + '%'))\n ).limit(10).all()\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom sqlalchemy import or_\n\nfrom ..extensions import db \nfrom .models import User\n\ndef create_user(username):\n user = User(username)\n db.session.add(user)\n return user\n\ndef get_user(user_id=None, **kwargs):\n if user_id is not None:\n return User.query.get(user_id)\n username = kwargs.pop(\"username\")\n if username is not None:\n return User.query.filter_by(username=username).first()\n\n raise NotImplementedError\n\ndef get_user_like(query):\n return User.query.filter(or_(User.username,like('%'+query+'%'))).limit(10).all()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# ============================================================================
# Archivo cnn_sisben.py
# autor Johan S. Mendez, Jose D. Mendez
# fecha 27/Agos/2020
# Clasificacion de beneficiarios del nuevo sistema de clasificacion del sisben
# agrupado en 4 grandes grupos de beneficiarios, se utiliza un red neuronal
# simple para el primero modelo de clasificación de salida multiple
# ============================================================================
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv1D
# from keras.callbacks import ModelCheckpoint
# from keras.models import model_from_json
# from keras import backend as K
# from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
print("\033[91m Lectura de datos \033[0m")
#************ Preparing the data ************#
# Read the data
path = "/Users/johan/Documents/GitHub/Cafe/bases_datos/base_maestra_sisben_iv.csv"
df = pd.read_csv(path)
# Eliminate the missing values for dataset
df = df.dropna()
# Choose a subset of the complete data
df_sample = df.sample(n=150000, random_state=123)
# Preparing the data to the model
X = df_sample.drop("sisben_iv", axis=1)
y = df_sample["sisben_iv"]
y = pd.get_dummies(y, dtype=float)
# We separate the categorical and numerical data in diferente variables
X_categorical = X.select_dtypes(include=["int", "object"])
X_categorical = pd.get_dummies(X,
columns=X_categorical.columns,
dtype=float)
x_train, x_test, y_train, y_test = train_test_split(np.asarray(X_categorical),
np.asarray(y),
test_size=0.33,
shuffle=True)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
model = Sequential()
model.add(Conv1D(64, (32),
input_shape=(x_train.shape[1], 1),
activation='elu'))
model.add(Flatten())
model.add(Dense(32, activation='elu'))
model.add(Dense(4, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer="Adam",
metrics=[tf.keras.metrics.CategoricalAccuracy()],
)
print(model.summary())
batch_size = 128
epochs = 100
model = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
plt.plot(model.history['loss'])
plt.plot(model.history['val_loss'])
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
plt.plot(model.history['categorical_accuracy'])
plt.plot(model.history['val_categorical_accuracy'])
plt.title('model train vs validation categorical_accuracy')
plt.ylabel('categorical_accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
|
normal
|
{
"blob_id": "bfb52a5ee6d88d63c4ef89dae26bb8cbecb091c6",
"index": 4200,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nseed(1)\n<mask token>\nset_random_seed(2)\nprint('\\x1b[91m Lectura de datos \\x1b[0m')\n<mask token>\nmodel.add(Conv1D(64, 32, input_shape=(x_train.shape[1], 1), activation='elu'))\nmodel.add(Flatten())\nmodel.add(Dense(32, activation='elu'))\nmodel.add(Dense(4, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=[\n tf.keras.metrics.CategoricalAccuracy()])\nprint(model.summary())\n<mask token>\nplt.plot(model.history['loss'])\nplt.plot(model.history['val_loss'])\nplt.title('model train vs validation loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\nplt.plot(model.history['categorical_accuracy'])\nplt.plot(model.history['val_categorical_accuracy'])\nplt.title('model train vs validation categorical_accuracy')\nplt.ylabel('categorical_accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n",
"step-3": "<mask token>\nseed(1)\n<mask token>\nset_random_seed(2)\nprint('\\x1b[91m Lectura de datos \\x1b[0m')\npath = (\n '/Users/johan/Documents/GitHub/Cafe/bases_datos/base_maestra_sisben_iv.csv'\n )\ndf = pd.read_csv(path)\ndf = df.dropna()\ndf_sample = df.sample(n=150000, random_state=123)\nX = df_sample.drop('sisben_iv', axis=1)\ny = df_sample['sisben_iv']\ny = pd.get_dummies(y, dtype=float)\nX_categorical = X.select_dtypes(include=['int', 'object'])\nX_categorical = pd.get_dummies(X, columns=X_categorical.columns, dtype=float)\nx_train, x_test, y_train, y_test = train_test_split(np.asarray(\n X_categorical), np.asarray(y), test_size=0.33, shuffle=True)\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)\nmodel = Sequential()\nmodel.add(Conv1D(64, 32, input_shape=(x_train.shape[1], 1), activation='elu'))\nmodel.add(Flatten())\nmodel.add(Dense(32, activation='elu'))\nmodel.add(Dense(4, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=[\n tf.keras.metrics.CategoricalAccuracy()])\nprint(model.summary())\nbatch_size = 128\nepochs = 100\nmodel = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_test, y_test))\nplt.plot(model.history['loss'])\nplt.plot(model.history['val_loss'])\nplt.title('model train vs validation loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\nplt.plot(model.history['categorical_accuracy'])\nplt.plot(model.history['val_categorical_accuracy'])\nplt.title('model train vs validation categorical_accuracy')\nplt.ylabel('categorical_accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv1D\nfrom sklearn.model_selection import train_test_split\nfrom numpy.random import seed\nseed(1)\nfrom tensorflow import set_random_seed\nset_random_seed(2)\nprint('\\x1b[91m Lectura de datos \\x1b[0m')\npath = (\n '/Users/johan/Documents/GitHub/Cafe/bases_datos/base_maestra_sisben_iv.csv'\n )\ndf = pd.read_csv(path)\ndf = df.dropna()\ndf_sample = df.sample(n=150000, random_state=123)\nX = df_sample.drop('sisben_iv', axis=1)\ny = df_sample['sisben_iv']\ny = pd.get_dummies(y, dtype=float)\nX_categorical = X.select_dtypes(include=['int', 'object'])\nX_categorical = pd.get_dummies(X, columns=X_categorical.columns, dtype=float)\nx_train, x_test, y_train, y_test = train_test_split(np.asarray(\n X_categorical), np.asarray(y), test_size=0.33, shuffle=True)\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)\nmodel = Sequential()\nmodel.add(Conv1D(64, 32, input_shape=(x_train.shape[1], 1), activation='elu'))\nmodel.add(Flatten())\nmodel.add(Dense(32, activation='elu'))\nmodel.add(Dense(4, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=[\n tf.keras.metrics.CategoricalAccuracy()])\nprint(model.summary())\nbatch_size = 128\nepochs = 100\nmodel = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,\n verbose=1, validation_data=(x_test, y_test))\nplt.plot(model.history['loss'])\nplt.plot(model.history['val_loss'])\nplt.title('model train vs validation loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\nplt.plot(model.history['categorical_accuracy'])\nplt.plot(model.history['val_categorical_accuracy'])\nplt.title('model train vs validation categorical_accuracy')\nplt.ylabel('categorical_accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n",
"step-5": "# ============================================================================\n# Archivo cnn_sisben.py\n# autor Johan S. Mendez, Jose D. Mendez\n# fecha 27/Agos/2020\n\n# Clasificacion de beneficiarios del nuevo sistema de clasificacion del sisben\n# agrupado en 4 grandes grupos de beneficiarios, se utiliza un red neuronal\n# simple para el primero modelo de clasificación de salida multiple\n\n\n# ============================================================================\n\n\nimport tensorflow as tf\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv1D\n# from keras.callbacks import ModelCheckpoint\n# from keras.models import model_from_json\n# from keras import backend as K\n# from keras.utils import to_categorical\n\nfrom sklearn.model_selection import train_test_split\n\nfrom numpy.random import seed\nseed(1)\nfrom tensorflow import set_random_seed\nset_random_seed(2)\n\n\nprint(\"\\033[91m Lectura de datos \\033[0m\")\n#************ Preparing the data ************#\n\n# Read the data\npath = \"/Users/johan/Documents/GitHub/Cafe/bases_datos/base_maestra_sisben_iv.csv\"\ndf = pd.read_csv(path)\n# Eliminate the missing values for dataset\ndf = df.dropna()\n# Choose a subset of the complete data\ndf_sample = df.sample(n=150000, random_state=123)\n\n# Preparing the data to the model\nX = df_sample.drop(\"sisben_iv\", axis=1)\ny = df_sample[\"sisben_iv\"]\ny = pd.get_dummies(y, dtype=float)\n\n# We separate the categorical and numerical data in diferente variables\nX_categorical = X.select_dtypes(include=[\"int\", \"object\"])\nX_categorical = pd.get_dummies(X,\n columns=X_categorical.columns,\n dtype=float)\n\nx_train, x_test, y_train, y_test = train_test_split(np.asarray(X_categorical),\n np.asarray(y),\n test_size=0.33,\n shuffle=True)\n\n\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)\n\nmodel = Sequential()\n\nmodel.add(Conv1D(64, (32),\n input_shape=(x_train.shape[1], 1),\n activation='elu'))\nmodel.add(Flatten())\nmodel.add(Dense(32, activation='elu'))\nmodel.add(Dense(4, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy',\n optimizer=\"Adam\",\n metrics=[tf.keras.metrics.CategoricalAccuracy()],\n )\n\nprint(model.summary())\n\nbatch_size = 128\nepochs = 100\nmodel = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n\n\nplt.plot(model.history['loss'])\nplt.plot(model.history['val_loss'])\nplt.title('model train vs validation loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n\nplt.plot(model.history['categorical_accuracy'])\nplt.plot(model.history['val_categorical_accuracy'])\nplt.title('model train vs validation categorical_accuracy')\nplt.ylabel('categorical_accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app = App.get_running_app()
if not self.collide_point(*touch.pos
) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus = False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
<|reserved_special_token_0|>
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',
'', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),
font_name='zekton__.ttf', bold=True, font_size='20sp')
else:
bubb_btn = CustomBubbleButton(text=str(x), font_name=
'zekton__.ttf', bold=True, font_size='20sp')
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = 'top_mid'
self.add_widget(self.bubb)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PanelInfoLabel(BoxLayout):
<|reserved_special_token_0|>
pass
class CustomBubbleButton(BubbleButton):
def add_text(self):
app = App.get_running_app()
index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1
if self.text != '<-':
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index + 1] + self.
text + app.root.ids.registerScreen.ids.input_field.text[
index + 1:])
app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0
else:
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index] + app.root.
ids.registerScreen.ids.input_field.text[index + 1:] if
index != -1 and app.root.ids.registerScreen.ids.input_field
.cursor != (0, 0) else app.root.ids.registerScreen.ids.
input_field.text)
app.root.ids.registerScreen.ids.input_field.cursor = index, 0
pass
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app = App.get_running_app()
if not self.collide_point(*touch.pos
) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus = False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
def __init__(self, **kwargs):
super(NumericKeyboard, self).__init__(**kwargs)
self.create_bubble_button()
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',
'', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),
font_name='zekton__.ttf', bold=True, font_size='20sp')
else:
bubb_btn = CustomBubbleButton(text=str(x), font_name=
'zekton__.ttf', bold=True, font_size='20sp')
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = 'top_mid'
self.add_widget(self.bubb)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PanelInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class CustomBubbleButton(BubbleButton):
def add_text(self):
app = App.get_running_app()
index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1
if self.text != '<-':
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index + 1] + self.
text + app.root.ids.registerScreen.ids.input_field.text[
index + 1:])
app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0
else:
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index] + app.root.
ids.registerScreen.ids.input_field.text[index + 1:] if
index != -1 and app.root.ids.registerScreen.ids.input_field
.cursor != (0, 0) else app.root.ids.registerScreen.ids.
input_field.text)
app.root.ids.registerScreen.ids.input_field.cursor = index, 0
pass
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app = App.get_running_app()
if not self.collide_point(*touch.pos
) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus = False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
def __init__(self, **kwargs):
super(NumericKeyboard, self).__init__(**kwargs)
self.create_bubble_button()
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',
'', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),
font_name='zekton__.ttf', bold=True, font_size='20sp')
else:
bubb_btn = CustomBubbleButton(text=str(x), font_name=
'zekton__.ttf', bold=True, font_size='20sp')
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = 'top_mid'
self.add_widget(self.bubb)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TotalsInfoLabel(BoxLayout):
<|reserved_special_token_0|>
pass
class PanelInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class CustomBubbleButton(BubbleButton):
def add_text(self):
app = App.get_running_app()
index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1
if self.text != '<-':
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index + 1] + self.
text + app.root.ids.registerScreen.ids.input_field.text[
index + 1:])
app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0
else:
app.root.ids.registerScreen.ids.input_field.text = (app.root.
ids.registerScreen.ids.input_field.text[:index] + app.root.
ids.registerScreen.ids.input_field.text[index + 1:] if
index != -1 and app.root.ids.registerScreen.ids.input_field
.cursor != (0, 0) else app.root.ids.registerScreen.ids.
input_field.text)
app.root.ids.registerScreen.ids.input_field.cursor = index, 0
pass
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app = App.get_running_app()
if not self.collide_point(*touch.pos
) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus = False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
def __init__(self, **kwargs):
super(NumericKeyboard, self).__init__(**kwargs)
self.create_bubble_button()
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',
'', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),
font_name='zekton__.ttf', bold=True, font_size='20sp')
else:
bubb_btn = CustomBubbleButton(text=str(x), font_name=
'zekton__.ttf', bold=True, font_size='20sp')
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = 'top_mid'
self.add_widget(self.bubb)
<|reserved_special_token_1|>
#_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#
# PROJECT : RegCEl - Registro para el Consumo Eléctrico #
# VERSION : 1.2 #
# AUTHOR : Yunior Barceló Chávez [email protected] #
# DATE : 9/01/2021 #
#_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#
"""
This file contains different customized widgets
Availabe classes:
-----------------
- HoverOneLineListItem
- LabelForList
- LabelForListStudent
- AdminInfoLabel
- AdminInfoEditField
- CustomRecycleView
"""
from kivymd.uix.list import OneLineListItem
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview import RecycleView
from kivy.uix.bubble import Bubble, BubbleButton
from hoverable import HoverBehavior
from kivy.uix.floatlayout import FloatLayout
from kivy.app import App
class LabelForList(Label):
"""
This class creates universal label to be used in list items across this application
"""
pass
class TotalsInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class PanelInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class CustomBubbleButton(BubbleButton):
def add_text(self):
app= App.get_running_app()
index=app.root.ids.registerScreen.ids.input_field.cursor[0]-1
if self.text!="<-":
app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index+1]+self.text + app.root.ids.registerScreen.ids.input_field.text[index+1:]
app.root.ids.registerScreen.ids.input_field.cursor=(index+2,0)
else:
app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index] + app.root.ids.registerScreen.ids.input_field.text[index+1:] if index != -1 and app.root.ids.registerScreen.ids.input_field.cursor != (0,0) else app.root.ids.registerScreen.ids.input_field.text
app.root.ids.registerScreen.ids.input_field.cursor=(index,0)
pass
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app= App.get_running_app()
if not self.collide_point(*touch.pos) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus=False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
def __init__(self, **kwargs):
super(NumericKeyboard, self).__init__(**kwargs)
self.create_bubble_button()
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0', '', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),font_name='zekton__.ttf', bold=True, font_size="20sp")
else:
bubb_btn = CustomBubbleButton(text=str(x),font_name='zekton__.ttf', bold=True, font_size="20sp")
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = "top_mid"
self.add_widget(self.bubb)
|
flexible
|
{
"blob_id": "7da8a074704b1851ac352477ef72a4c11cea1a0b",
"index": 6737,
"step-1": "<mask token>\n\n\nclass NumericKeyboard(Bubble):\n\n def on_touch_up(self, touch):\n app = App.get_running_app()\n if not self.collide_point(*touch.pos\n ) and not self.parent.collide_point(*touch.pos):\n self.parent.remove_widget(self.parent.bubb)\n app.root.ids.registerScreen.ids.input_field.focus = False\n delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')\n <mask token>\n\n def create_bubble_button(self):\n numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',\n '', '<-']\n for x in numeric_keypad:\n if x == '':\n bubb_btn = CustomBubbleButton(disabled=True, text=str(x),\n font_name='zekton__.ttf', bold=True, font_size='20sp')\n else:\n bubb_btn = CustomBubbleButton(text=str(x), font_name=\n 'zekton__.ttf', bold=True, font_size='20sp')\n self.numeric_keyboard_layout.add_widget(bubb_btn)\n\n\nclass ShowInputBubble(FloatLayout):\n\n def show_bubble(self, *l):\n if not hasattr(self, 'bubb'):\n self.bubb = NumericKeyboard()\n self.bubb.arrow_pos = 'top_mid'\n self.add_widget(self.bubb)\n",
"step-2": "<mask token>\n\n\nclass PanelInfoLabel(BoxLayout):\n <mask token>\n pass\n\n\nclass CustomBubbleButton(BubbleButton):\n\n def add_text(self):\n app = App.get_running_app()\n index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1\n if self.text != '<-':\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index + 1] + self.\n text + app.root.ids.registerScreen.ids.input_field.text[\n index + 1:])\n app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0\n else:\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index] + app.root.\n ids.registerScreen.ids.input_field.text[index + 1:] if \n index != -1 and app.root.ids.registerScreen.ids.input_field\n .cursor != (0, 0) else app.root.ids.registerScreen.ids.\n input_field.text)\n app.root.ids.registerScreen.ids.input_field.cursor = index, 0\n pass\n\n\nclass NumericKeyboard(Bubble):\n\n def on_touch_up(self, touch):\n app = App.get_running_app()\n if not self.collide_point(*touch.pos\n ) and not self.parent.collide_point(*touch.pos):\n self.parent.remove_widget(self.parent.bubb)\n app.root.ids.registerScreen.ids.input_field.focus = False\n delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')\n\n def __init__(self, **kwargs):\n super(NumericKeyboard, self).__init__(**kwargs)\n self.create_bubble_button()\n\n def create_bubble_button(self):\n numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',\n '', '<-']\n for x in numeric_keypad:\n if x == '':\n bubb_btn = CustomBubbleButton(disabled=True, text=str(x),\n font_name='zekton__.ttf', bold=True, font_size='20sp')\n else:\n bubb_btn = CustomBubbleButton(text=str(x), font_name=\n 'zekton__.ttf', bold=True, font_size='20sp')\n self.numeric_keyboard_layout.add_widget(bubb_btn)\n\n\nclass ShowInputBubble(FloatLayout):\n\n def show_bubble(self, *l):\n if not hasattr(self, 'bubb'):\n self.bubb = NumericKeyboard()\n self.bubb.arrow_pos = 'top_mid'\n self.add_widget(self.bubb)\n",
"step-3": "<mask token>\n\n\nclass PanelInfoLabel(BoxLayout):\n \"\"\"\n Customized `Label` to show personal/credential informations of the admin\n \"\"\"\n pass\n\n\nclass CustomBubbleButton(BubbleButton):\n\n def add_text(self):\n app = App.get_running_app()\n index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1\n if self.text != '<-':\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index + 1] + self.\n text + app.root.ids.registerScreen.ids.input_field.text[\n index + 1:])\n app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0\n else:\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index] + app.root.\n ids.registerScreen.ids.input_field.text[index + 1:] if \n index != -1 and app.root.ids.registerScreen.ids.input_field\n .cursor != (0, 0) else app.root.ids.registerScreen.ids.\n input_field.text)\n app.root.ids.registerScreen.ids.input_field.cursor = index, 0\n pass\n\n\nclass NumericKeyboard(Bubble):\n\n def on_touch_up(self, touch):\n app = App.get_running_app()\n if not self.collide_point(*touch.pos\n ) and not self.parent.collide_point(*touch.pos):\n self.parent.remove_widget(self.parent.bubb)\n app.root.ids.registerScreen.ids.input_field.focus = False\n delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')\n\n def __init__(self, **kwargs):\n super(NumericKeyboard, self).__init__(**kwargs)\n self.create_bubble_button()\n\n def create_bubble_button(self):\n numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',\n '', '<-']\n for x in numeric_keypad:\n if x == '':\n bubb_btn = CustomBubbleButton(disabled=True, text=str(x),\n font_name='zekton__.ttf', bold=True, font_size='20sp')\n else:\n bubb_btn = CustomBubbleButton(text=str(x), font_name=\n 'zekton__.ttf', bold=True, font_size='20sp')\n self.numeric_keyboard_layout.add_widget(bubb_btn)\n\n\nclass ShowInputBubble(FloatLayout):\n\n def show_bubble(self, *l):\n if not hasattr(self, 'bubb'):\n self.bubb = NumericKeyboard()\n self.bubb.arrow_pos = 'top_mid'\n self.add_widget(self.bubb)\n",
"step-4": "<mask token>\n\n\nclass TotalsInfoLabel(BoxLayout):\n <mask token>\n pass\n\n\nclass PanelInfoLabel(BoxLayout):\n \"\"\"\n Customized `Label` to show personal/credential informations of the admin\n \"\"\"\n pass\n\n\nclass CustomBubbleButton(BubbleButton):\n\n def add_text(self):\n app = App.get_running_app()\n index = app.root.ids.registerScreen.ids.input_field.cursor[0] - 1\n if self.text != '<-':\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index + 1] + self.\n text + app.root.ids.registerScreen.ids.input_field.text[\n index + 1:])\n app.root.ids.registerScreen.ids.input_field.cursor = index + 2, 0\n else:\n app.root.ids.registerScreen.ids.input_field.text = (app.root.\n ids.registerScreen.ids.input_field.text[:index] + app.root.\n ids.registerScreen.ids.input_field.text[index + 1:] if \n index != -1 and app.root.ids.registerScreen.ids.input_field\n .cursor != (0, 0) else app.root.ids.registerScreen.ids.\n input_field.text)\n app.root.ids.registerScreen.ids.input_field.cursor = index, 0\n pass\n\n\nclass NumericKeyboard(Bubble):\n\n def on_touch_up(self, touch):\n app = App.get_running_app()\n if not self.collide_point(*touch.pos\n ) and not self.parent.collide_point(*touch.pos):\n self.parent.remove_widget(self.parent.bubb)\n app.root.ids.registerScreen.ids.input_field.focus = False\n delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')\n\n def __init__(self, **kwargs):\n super(NumericKeyboard, self).__init__(**kwargs)\n self.create_bubble_button()\n\n def create_bubble_button(self):\n numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0',\n '', '<-']\n for x in numeric_keypad:\n if x == '':\n bubb_btn = CustomBubbleButton(disabled=True, text=str(x),\n font_name='zekton__.ttf', bold=True, font_size='20sp')\n else:\n bubb_btn = CustomBubbleButton(text=str(x), font_name=\n 'zekton__.ttf', bold=True, font_size='20sp')\n self.numeric_keyboard_layout.add_widget(bubb_btn)\n\n\nclass ShowInputBubble(FloatLayout):\n\n def show_bubble(self, *l):\n if not hasattr(self, 'bubb'):\n self.bubb = NumericKeyboard()\n self.bubb.arrow_pos = 'top_mid'\n self.add_widget(self.bubb)\n",
"step-5": "#_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#\n# PROJECT : RegCEl - Registro para el Consumo Eléctrico #\n# VERSION : 1.2 #\n# AUTHOR : Yunior Barceló Chávez [email protected] #\n# DATE : 9/01/2021 #\n#_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#\n\"\"\"\nThis file contains different customized widgets\n\nAvailabe classes:\n-----------------\n - HoverOneLineListItem\n - LabelForList\n - LabelForListStudent\n - AdminInfoLabel\n - AdminInfoEditField\n - CustomRecycleView\n\n\"\"\"\n\nfrom kivymd.uix.list import OneLineListItem\nfrom kivy.uix.label import Label\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.bubble import Bubble, BubbleButton\nfrom hoverable import HoverBehavior\nfrom kivy.uix.floatlayout import FloatLayout\n\nfrom kivy.app import App\n\n\nclass LabelForList(Label):\n \"\"\"\n This class creates universal label to be used in list items across this application\n \"\"\"\n\n pass\n\nclass TotalsInfoLabel(BoxLayout):\n \"\"\"\n Customized `Label` to show personal/credential informations of the admin\n \"\"\"\n\n pass\n\nclass PanelInfoLabel(BoxLayout):\n \"\"\"\n Customized `Label` to show personal/credential informations of the admin\n \"\"\"\n\n pass\n\n\nclass CustomBubbleButton(BubbleButton):\n \n def add_text(self):\n app= App.get_running_app()\n index=app.root.ids.registerScreen.ids.input_field.cursor[0]-1\n \n if self.text!=\"<-\":\n app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index+1]+self.text + app.root.ids.registerScreen.ids.input_field.text[index+1:]\n app.root.ids.registerScreen.ids.input_field.cursor=(index+2,0)\n else:\n app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index] + app.root.ids.registerScreen.ids.input_field.text[index+1:] if index != -1 and app.root.ids.registerScreen.ids.input_field.cursor != (0,0) else app.root.ids.registerScreen.ids.input_field.text\n app.root.ids.registerScreen.ids.input_field.cursor=(index,0)\n \n pass\n\n\nclass NumericKeyboard(Bubble):\n \n def on_touch_up(self, touch):\n app= App.get_running_app()\n if not self.collide_point(*touch.pos) and not self.parent.collide_point(*touch.pos):\n self.parent.remove_widget(self.parent.bubb)\n app.root.ids.registerScreen.ids.input_field.focus=False\n delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb') \n \n def __init__(self, **kwargs):\n super(NumericKeyboard, self).__init__(**kwargs)\n self.create_bubble_button()\n\n def create_bubble_button(self):\n numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0', '', '<-']\n for x in numeric_keypad:\n if x == '':\n bubb_btn = CustomBubbleButton(disabled=True, text=str(x),font_name='zekton__.ttf', bold=True, font_size=\"20sp\")\n else: \n bubb_btn = CustomBubbleButton(text=str(x),font_name='zekton__.ttf', bold=True, font_size=\"20sp\")\n self.numeric_keyboard_layout.add_widget(bubb_btn)\n\n\nclass ShowInputBubble(FloatLayout): \n def show_bubble(self, *l):\n if not hasattr(self, 'bubb'):\n self.bubb = NumericKeyboard()\n self.bubb.arrow_pos = \"top_mid\"\n self.add_widget(self.bubb)",
"step-ids": [
5,
9,
10,
11,
16
]
}
|
[
5,
9,
10,
11,
16
] |
<|reserved_special_token_0|>
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
<|reserved_special_token_0|>
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = np.log(true_p_mixture_probs
) / args.softmax_multiplier
args.seed = 1
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = util.init_models(
args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
print('Computing grads all at once is ok: {}'.format(are_tensors_equal(
grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.
format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
<|reserved_special_token_1|>
import torch
import util
import numpy as np
import argparse
import losses
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = np.log(true_p_mixture_probs
) / args.softmax_multiplier
args.seed = 1
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = util.init_models(
args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
print('Computing grads all at once is ok: {}'.format(are_tensors_equal(
grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.
format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
<|reserved_special_token_1|>
import torch
import util
import numpy as np
import argparse
import losses
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = \
np.log(true_p_mixture_probs) / args.softmax_multiplier
args.seed = 1
# init models
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = \
util.init_models(args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
# in rws, we step as we compute the grads
# optimizer_theta.step()
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in rws, we step as we compute the grads
# optimizer_phi.step()
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
# optimizer_phi.zero_grad() -> don't zero phi grads
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model, inference_network, obs,
num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(
latent_dist, num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(
latent_dist, latent).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
# optimizer_phi.zero_grad() -> don't zero phi grads
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
# is computing grads all in once ok?
print('Computing grads all at once is ok: {}'.format(
are_tensors_equal(grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.format(
are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
|
flexible
|
{
"blob_id": "8f558593e516aa4a769b7c5e1c95c8bc23a36420",
"index": 1232,
"step-1": "<mask token>\n\n\ndef get_grads_correct(seed):\n util.set_seed(seed)\n theta_grads_correct = []\n phi_grads_correct = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n theta_grads_correct = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n phi_grads_correct = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_correct, phi_grads_correct\n\n\n<mask token>\n\n\ndef get_grads_in_one_no_zeroing(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles=1, reparam=False):\n \"\"\"Compute log weight and log prob of inference network.\n\n Args:\n generative_model: models.GenerativeModel object\n inference_network: models.InferenceNetwork object\n obs: tensor of shape [batch_size]\n num_particles: int\n reparam: reparameterize sampling from q (only applicable if z is\n Concrete)\n\n Returns:\n log_weight: tensor of shape [batch_size, num_particles]\n log_q: tensor of shape [batch_size, num_particles]\n \"\"\"\n latent_dist = inference_network.get_latent_dist(obs)\n latent = inference_network.sample_from_latent_dist(latent_dist,\n num_particles, reparam=reparam)\n log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)\n log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent\n ).transpose(0, 1)\n log_weight = log_p - log_q.detach()\n return log_weight, log_q\n\n\ndef get_grads_weird_detach(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef are_tensors_equal(xs, ys):\n return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_grads_correct(seed):\n util.set_seed(seed)\n theta_grads_correct = []\n phi_grads_correct = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n theta_grads_correct = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n phi_grads_correct = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_correct, phi_grads_correct\n\n\ndef get_grads_in_one(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n optimizer_phi.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_grads_in_one_no_zeroing(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles=1, reparam=False):\n \"\"\"Compute log weight and log prob of inference network.\n\n Args:\n generative_model: models.GenerativeModel object\n inference_network: models.InferenceNetwork object\n obs: tensor of shape [batch_size]\n num_particles: int\n reparam: reparameterize sampling from q (only applicable if z is\n Concrete)\n\n Returns:\n log_weight: tensor of shape [batch_size, num_particles]\n log_q: tensor of shape [batch_size, num_particles]\n \"\"\"\n latent_dist = inference_network.get_latent_dist(obs)\n latent = inference_network.sample_from_latent_dist(latent_dist,\n num_particles, reparam=reparam)\n log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)\n log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent\n ).transpose(0, 1)\n log_weight = log_p - log_q.detach()\n return log_weight, log_q\n\n\ndef get_grads_weird_detach(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef are_tensors_equal(xs, ys):\n return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])\n\n\n<mask token>\n",
"step-3": "<mask token>\nargs = argparse.Namespace()\nargs.device = torch.device('cpu')\nargs.num_mixtures = 20\nargs.init_mixture_logits = np.ones(args.num_mixtures)\nargs.softmax_multiplier = 0.5\nargs.relaxed_one_hot = False\nargs.temperature = None\ntemp = np.arange(args.num_mixtures) + 5\ntrue_p_mixture_probs = temp / np.sum(temp)\nargs.true_mixture_logits = np.log(true_p_mixture_probs\n ) / args.softmax_multiplier\nargs.seed = 1\nutil.set_seed(args.seed)\ngenerative_model, inference_network, true_generative_model = util.init_models(\n args)\noptimizer_phi = torch.optim.Adam(inference_network.parameters())\noptimizer_theta = torch.optim.Adam(generative_model.parameters())\nbatch_size = 3\nnum_particles = 4\nobs = true_generative_model.sample_obs(batch_size)\n\n\ndef get_grads_correct(seed):\n util.set_seed(seed)\n theta_grads_correct = []\n phi_grads_correct = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n theta_grads_correct = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n phi_grads_correct = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_correct, phi_grads_correct\n\n\ndef get_grads_in_one(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n optimizer_phi.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_grads_in_one_no_zeroing(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles=1, reparam=False):\n \"\"\"Compute log weight and log prob of inference network.\n\n Args:\n generative_model: models.GenerativeModel object\n inference_network: models.InferenceNetwork object\n obs: tensor of shape [batch_size]\n num_particles: int\n reparam: reparameterize sampling from q (only applicable if z is\n Concrete)\n\n Returns:\n log_weight: tensor of shape [batch_size, num_particles]\n log_q: tensor of shape [batch_size, num_particles]\n \"\"\"\n latent_dist = inference_network.get_latent_dist(obs)\n latent = inference_network.sample_from_latent_dist(latent_dist,\n num_particles, reparam=reparam)\n log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)\n log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent\n ).transpose(0, 1)\n log_weight = log_p - log_q.detach()\n return log_weight, log_q\n\n\ndef get_grads_weird_detach(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef are_tensors_equal(xs, ys):\n return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])\n\n\nseed = 1\ngrads_correct = sum(get_grads_correct(seed), [])\ngrads_in_one = sum(get_grads_in_one(seed), [])\ngrads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])\ngrads_weird_detach = sum(get_grads_weird_detach(seed), [])\nprint('Computing grads all at once is ok: {}'.format(are_tensors_equal(\n grads_correct, grads_in_one)))\nprint('Computing grads all at once and not zeroing phi grads is ok: {}'.\n format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))\nprint('Computing grads with weird detach is ok: {}'.format(\n are_tensors_equal(grads_correct, grads_weird_detach)))\n",
"step-4": "import torch\nimport util\nimport numpy as np\nimport argparse\nimport losses\nargs = argparse.Namespace()\nargs.device = torch.device('cpu')\nargs.num_mixtures = 20\nargs.init_mixture_logits = np.ones(args.num_mixtures)\nargs.softmax_multiplier = 0.5\nargs.relaxed_one_hot = False\nargs.temperature = None\ntemp = np.arange(args.num_mixtures) + 5\ntrue_p_mixture_probs = temp / np.sum(temp)\nargs.true_mixture_logits = np.log(true_p_mixture_probs\n ) / args.softmax_multiplier\nargs.seed = 1\nutil.set_seed(args.seed)\ngenerative_model, inference_network, true_generative_model = util.init_models(\n args)\noptimizer_phi = torch.optim.Adam(inference_network.parameters())\noptimizer_theta = torch.optim.Adam(generative_model.parameters())\nbatch_size = 3\nnum_particles = 4\nobs = true_generative_model.sample_obs(batch_size)\n\n\ndef get_grads_correct(seed):\n util.set_seed(seed)\n theta_grads_correct = []\n phi_grads_correct = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n theta_grads_correct = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n phi_grads_correct = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_correct, phi_grads_correct\n\n\ndef get_grads_in_one(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n optimizer_phi.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_grads_in_one_no_zeroing(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles=1, reparam=False):\n \"\"\"Compute log weight and log prob of inference network.\n\n Args:\n generative_model: models.GenerativeModel object\n inference_network: models.InferenceNetwork object\n obs: tensor of shape [batch_size]\n num_particles: int\n reparam: reparameterize sampling from q (only applicable if z is\n Concrete)\n\n Returns:\n log_weight: tensor of shape [batch_size, num_particles]\n log_q: tensor of shape [batch_size, num_particles]\n \"\"\"\n latent_dist = inference_network.get_latent_dist(obs)\n latent = inference_network.sample_from_latent_dist(latent_dist,\n num_particles, reparam=reparam)\n log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)\n log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent\n ).transpose(0, 1)\n log_weight = log_p - log_q.detach()\n return log_weight, log_q\n\n\ndef get_grads_weird_detach(seed):\n util.set_seed(seed)\n theta_grads_in_one = []\n phi_grads_in_one = []\n log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,\n inference_network, obs, num_particles)\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef are_tensors_equal(xs, ys):\n return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])\n\n\nseed = 1\ngrads_correct = sum(get_grads_correct(seed), [])\ngrads_in_one = sum(get_grads_in_one(seed), [])\ngrads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])\ngrads_weird_detach = sum(get_grads_weird_detach(seed), [])\nprint('Computing grads all at once is ok: {}'.format(are_tensors_equal(\n grads_correct, grads_in_one)))\nprint('Computing grads all at once and not zeroing phi grads is ok: {}'.\n format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))\nprint('Computing grads with weird detach is ok: {}'.format(\n are_tensors_equal(grads_correct, grads_weird_detach)))\n",
"step-5": "import torch\nimport util\nimport numpy as np\nimport argparse\nimport losses\n\n\nargs = argparse.Namespace()\nargs.device = torch.device('cpu')\nargs.num_mixtures = 20\nargs.init_mixture_logits = np.ones(args.num_mixtures)\nargs.softmax_multiplier = 0.5\nargs.relaxed_one_hot = False\nargs.temperature = None\ntemp = np.arange(args.num_mixtures) + 5\ntrue_p_mixture_probs = temp / np.sum(temp)\nargs.true_mixture_logits = \\\n np.log(true_p_mixture_probs) / args.softmax_multiplier\n\nargs.seed = 1\n# init models\nutil.set_seed(args.seed)\ngenerative_model, inference_network, true_generative_model = \\\n util.init_models(args)\n\noptimizer_phi = torch.optim.Adam(inference_network.parameters())\noptimizer_theta = torch.optim.Adam(generative_model.parameters())\nbatch_size = 3\nnum_particles = 4\nobs = true_generative_model.sample_obs(batch_size)\n\n\ndef get_grads_correct(seed):\n util.set_seed(seed)\n\n theta_grads_correct = []\n phi_grads_correct = []\n\n log_weight, log_q = losses.get_log_weight_and_log_q(\n generative_model, inference_network, obs, num_particles)\n\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n theta_grads_correct = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n # in rws, we step as we compute the grads\n # optimizer_theta.step()\n\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n phi_grads_correct = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n # in rws, we step as we compute the grads\n # optimizer_phi.step()\n return theta_grads_correct, phi_grads_correct\n\n\ndef get_grads_in_one(seed):\n util.set_seed(seed)\n\n theta_grads_in_one = []\n phi_grads_in_one = []\n\n log_weight, log_q = losses.get_log_weight_and_log_q(\n generative_model, inference_network, obs, num_particles)\n\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n\n optimizer_phi.zero_grad()\n # optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n\n # only get the grads in the end!\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n\n # in pyro, we want step to be in a different stage\n # optimizer_theta.step()\n # optimizer_phi.step()\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_grads_in_one_no_zeroing(seed):\n util.set_seed(seed)\n\n theta_grads_in_one = []\n phi_grads_in_one = []\n\n log_weight, log_q = losses.get_log_weight_and_log_q(\n generative_model, inference_network, obs, num_particles)\n\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n\n # optimizer_phi.zero_grad() -> don't zero phi grads\n # optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n\n # only get the grads in the end!\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n\n # in pyro, we want step to be in a different stage\n # optimizer_theta.step()\n # optimizer_phi.step()\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef get_log_weight_and_log_q_weird_detach(generative_model, inference_network, obs,\n num_particles=1, reparam=False):\n \"\"\"Compute log weight and log prob of inference network.\n\n Args:\n generative_model: models.GenerativeModel object\n inference_network: models.InferenceNetwork object\n obs: tensor of shape [batch_size]\n num_particles: int\n reparam: reparameterize sampling from q (only applicable if z is\n Concrete)\n\n Returns:\n log_weight: tensor of shape [batch_size, num_particles]\n log_q: tensor of shape [batch_size, num_particles]\n \"\"\"\n\n latent_dist = inference_network.get_latent_dist(obs)\n latent = inference_network.sample_from_latent_dist(\n latent_dist, num_particles, reparam=reparam)\n log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)\n log_q = inference_network.get_log_prob_from_latent_dist(\n latent_dist, latent).transpose(0, 1)\n log_weight = log_p - log_q.detach()\n return log_weight, log_q\n\n\ndef get_grads_weird_detach(seed):\n util.set_seed(seed)\n\n theta_grads_in_one = []\n phi_grads_in_one = []\n\n log_weight, log_q = get_log_weight_and_log_q_weird_detach(\n generative_model, inference_network, obs, num_particles)\n\n optimizer_phi.zero_grad()\n optimizer_theta.zero_grad()\n wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(\n log_weight)\n wake_theta_loss.backward(retain_graph=True)\n\n # optimizer_phi.zero_grad() -> don't zero phi grads\n # optimizer_theta.zero_grad()\n wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(\n log_weight, log_q)\n wake_phi_loss.backward()\n\n # only get the grads in the end!\n theta_grads_in_one = [parameter.grad.clone() for parameter in\n generative_model.parameters()]\n phi_grads_in_one = [parameter.grad.clone() for parameter in\n inference_network.parameters()]\n\n # in pyro, we want step to be in a different stage\n # optimizer_theta.step()\n # optimizer_phi.step()\n return theta_grads_in_one, phi_grads_in_one\n\n\ndef are_tensors_equal(xs, ys):\n return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])\n\n\nseed = 1\ngrads_correct = sum(get_grads_correct(seed), [])\ngrads_in_one = sum(get_grads_in_one(seed), [])\ngrads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])\ngrads_weird_detach = sum(get_grads_weird_detach(seed), [])\n\n# is computing grads all in once ok?\nprint('Computing grads all at once is ok: {}'.format(\n are_tensors_equal(grads_correct, grads_in_one)))\nprint('Computing grads all at once and not zeroing phi grads is ok: {}'.format(\n are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))\nprint('Computing grads with weird detach is ok: {}'.format(\n are_tensors_equal(grads_correct, grads_weird_detach)))\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
# -*- coding: utf-8 -*-
__author__ = 'virtual'
statuses = {
None: {'name': 'None', },
-1: { 'name': 'unknown', },
0: { 'name': '',},
1: { 'name': 'Новый',},
2: { 'name': '',},
3: { 'name': 'Активный', },
4: { 'name': 'Приостановленный',},
5: { 'name': 'Заблокированный', },
6: { 'name': 'Удаленный', },
7: { 'name': 'Закрытый', },
8: { 'name': '', },
}
def get_status_name(status):
return '[%d]%s' % (status, statuses[status]['name'], )
|
normal
|
{
"blob_id": "a847fc32af2602db3b5545c15186c0209eb8ae8d",
"index": 4008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-3": "__author__ = 'virtual'\nstatuses = {None: {'name': 'None'}, (-1): {'name': 'unknown'}, (0): {'name':\n ''}, (1): {'name': 'Новый'}, (2): {'name': ''}, (3): {'name':\n 'Активный'}, (4): {'name': 'Приостановленный'}, (5): {'name':\n 'Заблокированный'}, (6): {'name': 'Удаленный'}, (7): {'name':\n 'Закрытый'}, (8): {'name': ''}}\n\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'])\n",
"step-4": "# -*- coding: utf-8 -*-\n\n__author__ = 'virtual'\n\n\nstatuses = {\n None: {'name': 'None', },\n -1: { 'name': 'unknown', },\n 0: { 'name': '',},\n 1: { 'name': 'Новый',},\n 2: { 'name': '',},\n 3: { 'name': 'Активный', },\n 4: { 'name': 'Приостановленный',},\n 5: { 'name': 'Заблокированный', },\n 6: { 'name': 'Удаленный', },\n 7: { 'name': 'Закрытый', },\n 8: { 'name': '', },\n}\n\ndef get_status_name(status):\n return '[%d]%s' % (status, statuses[status]['name'], )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(argv) == 2:
docdir = argv[1]
<|reserved_special_token_0|>
spot.InsertBefore('Hello COM client world!')
newdoc.SaveAs(docdir + 'pycom.doc')
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
<|reserved_special_token_0|>
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
docdir = 'C:\\temp\\'
if len(argv) == 2:
docdir = argv[1]
<|reserved_special_token_0|>
word = Dispatch('Word.Application')
word.Visible = 1
newdoc = word.Documents.Add()
spot = newdoc.Range(0, 0)
spot.InsertBefore('Hello COM client world!')
newdoc.SaveAs(docdir + 'pycom.doc')
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
<|reserved_special_token_1|>
from sys import argv
docdir = 'C:\\temp\\'
if len(argv) == 2:
docdir = argv[1]
from win32com.client import Dispatch
word = Dispatch('Word.Application')
word.Visible = 1
newdoc = word.Documents.Add()
spot = newdoc.Range(0, 0)
spot.InsertBefore('Hello COM client world!')
newdoc.SaveAs(docdir + 'pycom.doc')
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
<|reserved_special_token_1|>
####################################################################
# a COM client coded in Python: talk to MS-Word via its COM object
# model; uses either dynamic dispatch (run-time lookup/binding),
# or the static and faster type-library dispatch if makepy.py has
# been run; install the windows win32all extensions package to use
# this interface; Word runs hidden unless Visible is set to 1 (and
# Visible lets you watch, but impacts interactive Word sessions);
####################################################################
from sys import argv
docdir = 'C:\\temp\\'
if len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\
from win32com.client import Dispatch # early or late binding
word = Dispatch('Word.Application') # connect/start word
word.Visible = 1 # else word runs hidden
# create and save new doc file
newdoc = word.Documents.Add() # call word methods
spot = newdoc.Range(0,0)
spot.InsertBefore('Hello COM client world!') # insert some text
newdoc.SaveAs(docdir + 'pycom.doc') # save in doc file
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
# open and change a doc file
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
# and so on: see Word's COM interface specs
|
flexible
|
{
"blob_id": "df19aa720993c2385a6d025cf7ec8f3935ee4191",
"index": 9343,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\n<mask token>\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-3": "<mask token>\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-4": "from sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\nfrom win32com.client import Dispatch\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-5": "####################################################################\n# a COM client coded in Python: talk to MS-Word via its COM object\n# model; uses either dynamic dispatch (run-time lookup/binding), \n# or the static and faster type-library dispatch if makepy.py has \n# been run; install the windows win32all extensions package to use \n# this interface; Word runs hidden unless Visible is set to 1 (and\n# Visible lets you watch, but impacts interactive Word sessions);\n####################################################################\n\nfrom sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\\\n\nfrom win32com.client import Dispatch # early or late binding\nword = Dispatch('Word.Application') # connect/start word\nword.Visible = 1 # else word runs hidden\n\n# create and save new doc file\nnewdoc = word.Documents.Add() # call word methods\nspot = newdoc.Range(0,0)\nspot.InsertBefore('Hello COM client world!') # insert some text\nnewdoc.SaveAs(docdir + 'pycom.doc') # save in doc file\nnewdoc.SaveAs(docdir + 'copy.doc') \nnewdoc.Close()\n\n# open and change a doc file\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n\n# and so on: see Word's COM interface specs\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bottle import response,request,route,run
from json import dumps
import ConfigParser
import pickle
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
import pickle
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
def fun(dat):
big=[]
for i in dat['Summary']:
st=''
ls=[]
for j in i.split(','):
#print j
ls.append(wordnet_lemmatizer.lemmatize(j))
#print ls
big.append(' '.join(ls))
return big
#Initialization starts
#configParser=ConfigParser.RawConfigParser()
#configFilePath="Config.cfg"
#configParser.read(configFilePath)
#Host=configParser.get('file','host')
#Port=configParser.get('file','port')
#Config read ends
#This method trains and creates a classifier from training data in csv file
@route('/trainBot',method='POST')
def trainBot():
response.content_type='application/json'
data2=[]
print "training...."
data=pd.read_csv('trainData.csv',header=None)
import preprocess
from preprocess import number_removal,generate_word_frequency
import re
#print data
data.columns=['Intent','Summary']
data['Summary']=data.apply(number_removal,axis=1)
data['Summary'] = data.apply(generate_word_frequency,axis=1)
data['Summary']=fun(data)
from nltk.corpus import stopwords
stop = stopwords.words('english')
stop.extend(('.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))
for i in ['ask','alexa','allexa','tel','tell']:
stop.append(i)
le=LabelEncoder()
X=data['Summary'].fillna('')
y=data['Intent'].fillna('')
y=le.fit_transform(y)
classifier = Pipeline([
('vec',CountVectorizer(strip_accents='unicode',stop_words=stop)),
('tfidf', TfidfTransformer()),
('clf', RandomForestClassifier(n_estimators=10,random_state=0))])
classifier=classifier.fit(X, y)
f = open('random_forest_model.pickle', 'wb')
pickle.dump(classifier, f)
f.close()
f = open('label.pickle', 'wb')
pickle.dump(le, f)
f.close()
print "training completed"
item={"result":"training completed"}
data2.append(item)
return dumps(data2)
#This method classifies the input text based on the trained classifier
@route('/classify2',method='POST')
def classify2():
# read python dict back from the file
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
response.content_type='application/json'
data=[]
inputText=request.json["input"]
print "input text : ",inputText
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print str(round(confidence[0][index],2))+" "+ predicted_class[0]
item={"result":str(round(confidence[0][index],2))+" "+ predicted_class[0]}
data.append(item)
return dumps(data)
#This method classifies and returns others based on confidence score
def classifyTextWithScore(inputText):
f = open('random_forest_model.pickle', 'rb')
classifier=pickle.load(f)
f.close()
f = open('label.pickle', 'rb')
label=pickle.load(f)
f.close()
confidence=classifier.predict_proba([inputText])
index=np.argmax(confidence)
predicted_class=label.inverse_transform(classifier.predict([inputText]))
print round(confidence[0][index],2),predicted_class
if (round(confidence[0][index],2)<0.7):
return "others"
elif(len(inputText.split(" "))<2):
return "others"
else:
return predicted_class[0]
#run(host='172.31.45.19', port=7500)
#print "hai"
print classifyTextWithScore("payments made last week where remitter bank wants to stop the payment")
#run(host='192.168.1.7',port=8000)
|
normal
|
{
"blob_id": "f0b5ad49fc47adc54fb16a151b4a0ed563f53a42",
"index": 9482,
"step-1": "from bottle import response,request,route,run\nfrom json import dumps\nimport ConfigParser\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\n\nfrom nltk.stem import WordNetLemmatizer\nwordnet_lemmatizer = WordNetLemmatizer()\ndef fun(dat):\n big=[]\n for i in dat['Summary']:\n st=''\n ls=[]\n for j in i.split(','):\n #print j\n ls.append(wordnet_lemmatizer.lemmatize(j))\n #print ls\n big.append(' '.join(ls))\n return big\n\n\n\n\n#Initialization starts\n#configParser=ConfigParser.RawConfigParser()\n#configFilePath=\"Config.cfg\"\n#configParser.read(configFilePath)\n#Host=configParser.get('file','host')\n#Port=configParser.get('file','port')\n\n#Config read ends\n\n\n#This method trains and creates a classifier from training data in csv file\n@route('/trainBot',method='POST')\ndef trainBot():\n response.content_type='application/json'\n data2=[]\n print \"training....\"\n data=pd.read_csv('trainData.csv',header=None)\n import preprocess\n from preprocess import number_removal,generate_word_frequency\n import re\n #print data\n data.columns=['Intent','Summary']\n \n data['Summary']=data.apply(number_removal,axis=1)\n data['Summary'] = data.apply(generate_word_frequency,axis=1)\n \n data['Summary']=fun(data)\n \n from nltk.corpus import stopwords\n stop = stopwords.words('english')\n stop.extend(('.', ',', '\"', \"'\", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}','/','-'))\n \n for i in ['ask','alexa','allexa','tel','tell']:\n stop.append(i)\n \n le=LabelEncoder()\n X=data['Summary'].fillna('')\n y=data['Intent'].fillna('')\n y=le.fit_transform(y)\n \n classifier = Pipeline([\n ('vec',CountVectorizer(strip_accents='unicode',stop_words=stop)),\n ('tfidf', TfidfTransformer()),\n ('clf', RandomForestClassifier(n_estimators=10,random_state=0))])\n \n classifier=classifier.fit(X, y)\n \n \n f = open('random_forest_model.pickle', 'wb')\n pickle.dump(classifier, f)\n f.close()\n \n f = open('label.pickle', 'wb')\n pickle.dump(le, f)\n f.close()\n \n print \"training completed\"\n item={\"result\":\"training completed\"}\n data2.append(item)\n return dumps(data2)\n\n\t\n#This method classifies the input text based on the trained classifier\n@route('/classify2',method='POST')\ndef classify2():\n # read python dict back from the file\n f = open('random_forest_model.pickle', 'rb')\n classifier=pickle.load(f)\n f.close()\n \n f = open('label.pickle', 'rb')\n label=pickle.load(f)\n f.close()\n response.content_type='application/json'\n data=[]\n inputText=request.json[\"input\"]\n print \"input text : \",inputText\n confidence=classifier.predict_proba([inputText])\n index=np.argmax(confidence)\n \n predicted_class=label.inverse_transform(classifier.predict([inputText]))\n \n print str(round(confidence[0][index],2))+\" \"+ predicted_class[0]\n \n item={\"result\":str(round(confidence[0][index],2))+\" \"+ predicted_class[0]}\n data.append(item)\n return dumps(data)\n\n\n#This method classifies and returns others based on confidence score\ndef classifyTextWithScore(inputText):\n f = open('random_forest_model.pickle', 'rb')\n classifier=pickle.load(f)\n f.close()\n \n f = open('label.pickle', 'rb')\n label=pickle.load(f)\n f.close()\n \n confidence=classifier.predict_proba([inputText])\n index=np.argmax(confidence)\n \n predicted_class=label.inverse_transform(classifier.predict([inputText]))\n \n \n print round(confidence[0][index],2),predicted_class\n if (round(confidence[0][index],2)<0.7):\n return \"others\"\n elif(len(inputText.split(\" \"))<2):\n\treturn \"others\"\n else:\n\treturn predicted_class[0]\n\t\n#run(host='172.31.45.19', port=7500)\n#print \"hai\"\nprint classifyTextWithScore(\"payments made last week where remitter bank wants to stop the payment\")\n\n\n#run(host='192.168.1.7',port=8000)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
<|reserved_special_token_0|>
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
<|reserved_special_token_0|>
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = 'Post in blog ' + post.blog.user.username
context = {'header': post.header, 'text': post.text, 'id': post.id,
'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, '', '', [post.user.email])
msg.attach_alternative(html_content, 'text/html')
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
class Blog(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = 'Post in blog ' + post.blog.user.username
context = {'header': post.header, 'text': post.text, 'id': post.id,
'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, '', '', [post.user.email])
msg.attach_alternative(html_content, 'text/html')
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
class Blog(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
class Post(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User,on_delete=models.CASCADE,)
header = models.CharField(max_length=50)
text = models.CharField(max_length=2048)
create_date = models.DateTimeField(auto_now=True)
@receiver(post_save, sender=Post)
def send_email(sender, **kwargs):
post = Post.objects.get(id=kwargs.get('instance').id)
template = loader.get_template('post2email.html')
subject = "Post in blog " + post.blog.user.username
context = { "header": post.header,
"text": post.text,
"id": post.id,
"host": getattr(settings, 'MY_DJANGO_URL_PATH', ''),
}
html_content = template.render(context)
msg = EmailMultiAlternatives(subject, "", "", [post.user.email])
msg.attach_alternative(html_content, "text/html")
msg.send()
class ReadPost(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
class Subscription(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,)
blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)
|
flexible
|
{
"blob_id": "de77edaccdaada785f41828135ad2da4ae2b403e",
"index": 725,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n header = models.CharField(max_length=50)\n text = models.CharField(max_length=2048)\n create_date = models.DateTimeField(auto_now=True)\n\n\n<mask token>\n\n\nclass ReadPost(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)\n\n\nclass Subscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n",
"step-2": "<mask token>\n\n\nclass Blog(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\nclass Post(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n header = models.CharField(max_length=50)\n text = models.CharField(max_length=2048)\n create_date = models.DateTimeField(auto_now=True)\n\n\n<mask token>\n\n\nclass ReadPost(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)\n\n\nclass Subscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n",
"step-3": "<mask token>\n\n\nclass Blog(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\nclass Post(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n header = models.CharField(max_length=50)\n text = models.CharField(max_length=2048)\n create_date = models.DateTimeField(auto_now=True)\n\n\n@receiver(post_save, sender=Post)\ndef send_email(sender, **kwargs):\n post = Post.objects.get(id=kwargs.get('instance').id)\n template = loader.get_template('post2email.html')\n subject = 'Post in blog ' + post.blog.user.username\n context = {'header': post.header, 'text': post.text, 'id': post.id,\n 'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}\n html_content = template.render(context)\n msg = EmailMultiAlternatives(subject, '', '', [post.user.email])\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n\n\nclass ReadPost(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)\n\n\nclass Subscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template import loader\nfrom django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\n\n\nclass Blog(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\nclass Post(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n header = models.CharField(max_length=50)\n text = models.CharField(max_length=2048)\n create_date = models.DateTimeField(auto_now=True)\n\n\n@receiver(post_save, sender=Post)\ndef send_email(sender, **kwargs):\n post = Post.objects.get(id=kwargs.get('instance').id)\n template = loader.get_template('post2email.html')\n subject = 'Post in blog ' + post.blog.user.username\n context = {'header': post.header, 'text': post.text, 'id': post.id,\n 'host': getattr(settings, 'MY_DJANGO_URL_PATH', '')}\n html_content = template.render(context)\n msg = EmailMultiAlternatives(subject, '', '', [post.user.email])\n msg.attach_alternative(html_content, 'text/html')\n msg.send()\n\n\nclass ReadPost(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)\n\n\nclass Subscription(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template import loader\nfrom django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\n\nclass Blog(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE,)\n\nclass Post(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)\n user = models.ForeignKey(User,on_delete=models.CASCADE,)\n header = models.CharField(max_length=50)\n text = models.CharField(max_length=2048)\n create_date = models.DateTimeField(auto_now=True)\n\n@receiver(post_save, sender=Post)\ndef send_email(sender, **kwargs):\n post = Post.objects.get(id=kwargs.get('instance').id)\n template = loader.get_template('post2email.html')\n subject = \"Post in blog \" + post.blog.user.username\n context = { \"header\": post.header,\n \"text\": post.text,\n \"id\": post.id,\n \"host\": getattr(settings, 'MY_DJANGO_URL_PATH', ''),\n }\n html_content = template.render(context)\n msg = EmailMultiAlternatives(subject, \"\", \"\", [post.user.email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send() \n \nclass ReadPost(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE,)\n post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)\n\nclass Subscription(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE,)\n blog = models.ForeignKey(Blog, on_delete=models.DO_NOTHING)",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 18:18:36 2018
@author: Nicole
"""
from __future__ import division
import Rod
import matplotlib.pyplot as plt
import math
class Truss:
def __init__(self,node1,node2,size,result,ax):
self.node1=node1
self.node2=node2
self.rod=Rod.Rod(node1,node2,result)
self.size=size
self.result=result
self.ax=ax
self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2)
def PlotCalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
self.rod.PlotResult()
def PlotUncalculatedTruss(self):
self.node1.PlotNode()
self.node1.PlotSupport()
self.node1.PlotForce()
self.node2.PlotNode()
self.node2.PlotSupport()
self.node2.PlotForce()
self.rod.PlotRod()
def SaveTrussFig(self):
plt.savefig('truss.png',dpi=600)
plt.show()
'''
pud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667)
pud.setfig()
pud.plot()
pud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333)
pud.plot()
pud.savefig()
'''
|
normal
|
{
"blob_id": "f01a1b6d0de4ba685c489af2742159447f943d2d",
"index": 5605,
"step-1": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n <mask token>\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 24 18:18:36 2018\n\n@author: Nicole\n\"\"\"\n\n\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\nclass Truss:\n def __init__(self,node1,node2,size,result,ax):\n self.node1=node1\n self.node2=node2\n self.rod=Rod.Rod(node1,node2,result)\n self.size=size\n self.result=result\n self.ax=ax\n self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2)\n def PlotCalculatedTruss(self): \n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n def SaveTrussFig(self):\n plt.savefig('truss.png',dpi=600)\n plt.show()\n\n'''\npud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667)\npud.setfig()\npud.plot()\npud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333)\npud.plot()\npud.savefig()\n'''",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class NSDescriptorsViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail=
'One or more individual NS descriptor resource have been created'
)
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(on_boarded))
if disabled != request.data['nsdOperationalState'
] and enabled != request.data['nsdOperationalState']:
raise APIException(detail=
'ValueError: invalid operationalState', code=status.
HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.
format(disabled), code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(
not_in_use), code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(created), code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value'
)
network_service_path = decompress_zip(request.data['file'], '{}{}'.
format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=
network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(
vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NSDescriptorsViewSet(viewsets.ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail=
'One or more individual NS descriptor resource have been created'
)
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(on_boarded))
if disabled != request.data['nsdOperationalState'
] and enabled != request.data['nsdOperationalState']:
raise APIException(detail=
'ValueError: invalid operationalState', code=status.
HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.
format(disabled), code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(
not_in_use), code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(created), code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value'
)
network_service_path = decompress_zip(request.data['file'], '{}{}'.
format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=
network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(
vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NSDescriptorsViewSet(viewsets.ModelViewSet):
queryset = NsdInfo.objects.all()
serializer_class = NsdInfoSerializer
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail=
'One or more individual NS descriptor resource have been created'
)
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(on_boarded))
if disabled != request.data['nsdOperationalState'
] and enabled != request.data['nsdOperationalState']:
raise APIException(detail=
'ValueError: invalid operationalState', code=status.
HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.
format(disabled), code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(
not_in_use), code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(created), code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value'
)
network_service_path = decompress_zip(request.data['file'], '{}{}'.
format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=
network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(
vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
<|reserved_special_token_1|>
from rest_framework import viewsets
from rest_framework.exceptions import APIException
from NSDManagement.serializers import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.utils import json
from rest_framework.decorators import action
from VnfPackageManagement.models import VnfPkgInfo
from utils.file_manipulation import remove_file, decompress_zip
from utils.format_tools import set_request_parameter_to_string
from utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created
from utils.process_package.ns_descriptor import NetworkServiceDescriptor
class NSDescriptorsViewSet(viewsets.ModelViewSet):
queryset = NsdInfo.objects.all()
serializer_class = NsdInfoSerializer
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail=
'One or more individual NS descriptor resource have been created'
)
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(on_boarded))
if disabled != request.data['nsdOperationalState'
] and enabled != request.data['nsdOperationalState']:
raise APIException(detail=
'ValueError: invalid operationalState', code=status.
HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.
format(disabled), code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(
not_in_use), code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.
format(created), code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value'
)
network_service_path = decompress_zip(request.data['file'], '{}{}'.
format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=
network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(
vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
<|reserved_special_token_1|>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework import viewsets
from rest_framework.exceptions import APIException
from NSDManagement.serializers import *
from rest_framework.response import Response
from rest_framework import status
from rest_framework.utils import json
from rest_framework.decorators import action
from VnfPackageManagement.models import VnfPkgInfo
from utils.file_manipulation import remove_file, decompress_zip
from utils.format_tools import set_request_parameter_to_string
from utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created
from utils.process_package.ns_descriptor import NetworkServiceDescriptor
class NSDescriptorsViewSet(viewsets.ModelViewSet):
queryset = NsdInfo.objects.all()
serializer_class = NsdInfoSerializer
def create(self, request, *args, **kwargs):
set_request_parameter_to_string(request, 'userDefinedData')
request.data['_links'] = {'self': request.build_absolute_uri(),
'nsd_content': request.build_absolute_uri()}
return super().create(request)
def get_success_headers(self, data):
return {'Location': data['_links']['self']}
def list(self, request, *args, **kwargs):
if self.get_queryset().__len__() < 1:
raise APIException(detail='One or more individual NS descriptor resource have been created')
return super().list(request)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if on_boarded != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(on_boarded))
if disabled != request.data['nsdOperationalState'] and enabled != request.data['nsdOperationalState']:
raise APIException(detail='ValueError: invalid operationalState',
code=status.HTTP_409_CONFLICT)
response = request.data.copy()
set_request_parameter_to_string(request, 'userDefinedData')
super().update(request)
return Response(response, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if disabled != instance.nsdOperationalState:
raise APIException(detail='NSD nsdOperationalState is not {}'.format(disabled),
code=status.HTTP_409_CONFLICT)
if not_in_use != instance.nsdUsageState:
raise APIException(detail='NSD nsdUsageState is not {}'.format(not_in_use),
code=status.HTTP_409_CONFLICT)
remove_file('{}{}'.format(nsd_base_path, instance.id))
super().destroy(request)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['PUT'], url_path='nsd_content')
def upload_content(self, request, **kwargs):
instance = self.get_object()
if created != instance.nsdOnboardingState:
raise APIException(detail='NSD nsdOnboardingState is not {}'.format(created),
code=status.HTTP_409_CONFLICT)
if 'application/zip' not in request.META['HTTP_ACCEPT']:
raise APIException(detail='HEAD need to have application/zip value')
network_service_path = decompress_zip(
request.data["file"], '{}{}'.format(nsd_base_path, instance.id) + '/nsd_content/')
network_service_descriptor = NetworkServiceDescriptor(path=network_service_path)
nsd_content = network_service_descriptor.processing_data()
vnf_pkg_ids_list = list()
for vnfd in network_service_descriptor.get_constituent_vnfd():
vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(vnfdId__iexact=vnfd['vnfd_id']).last().id))
nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)
serializer = self.get_serializer(instance, data=nsd_content)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_202_ACCEPTED)
|
flexible
|
{
"blob_id": "5e2fcc6379a8ecee0378d26108e4deab9d17dba6",
"index": 7483,
"step-1": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-2": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-3": "<mask token>\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-4": "from rest_framework import viewsets\nfrom rest_framework.exceptions import APIException\nfrom NSDManagement.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.utils import json\nfrom rest_framework.decorators import action\nfrom VnfPackageManagement.models import VnfPkgInfo\nfrom utils.file_manipulation import remove_file, decompress_zip\nfrom utils.format_tools import set_request_parameter_to_string\nfrom utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created\nfrom utils.process_package.ns_descriptor import NetworkServiceDescriptor\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail=\n 'One or more individual NS descriptor resource have been created'\n )\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(on_boarded))\n if disabled != request.data['nsdOperationalState'\n ] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail=\n 'ValueError: invalid operationalState', code=status.\n HTTP_409_CONFLICT)\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.\n format(disabled), code=status.HTTP_409_CONFLICT)\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(\n not_in_use), code=status.HTTP_409_CONFLICT)\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.\n format(created), code=status.HTTP_409_CONFLICT)\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value'\n )\n network_service_path = decompress_zip(request.data['file'], '{}{}'.\n format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=\n network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(\n vnfdId__iexact=vnfd['vnfd_id']).last().id))\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-5": "# All Rights Reserved.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import APIException\nfrom NSDManagement.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.utils import json\nfrom rest_framework.decorators import action\nfrom VnfPackageManagement.models import VnfPkgInfo\nfrom utils.file_manipulation import remove_file, decompress_zip\nfrom utils.format_tools import set_request_parameter_to_string\nfrom utils.process_package.base_package import on_boarded, disabled, enabled, not_in_use, created\nfrom utils.process_package.ns_descriptor import NetworkServiceDescriptor\n\n\nclass NSDescriptorsViewSet(viewsets.ModelViewSet):\n queryset = NsdInfo.objects.all()\n serializer_class = NsdInfoSerializer\n\n def create(self, request, *args, **kwargs):\n set_request_parameter_to_string(request, 'userDefinedData')\n request.data['_links'] = {'self': request.build_absolute_uri(),\n 'nsd_content': request.build_absolute_uri()}\n return super().create(request)\n\n def get_success_headers(self, data):\n return {'Location': data['_links']['self']}\n\n def list(self, request, *args, **kwargs):\n if self.get_queryset().__len__() < 1:\n raise APIException(detail='One or more individual NS descriptor resource have been created')\n\n return super().list(request)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n if on_boarded != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.format(on_boarded))\n\n if disabled != request.data['nsdOperationalState'] and enabled != request.data['nsdOperationalState']:\n raise APIException(detail='ValueError: invalid operationalState',\n code=status.HTTP_409_CONFLICT)\n\n response = request.data.copy()\n set_request_parameter_to_string(request, 'userDefinedData')\n\n super().update(request)\n return Response(response, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n if disabled != instance.nsdOperationalState:\n raise APIException(detail='NSD nsdOperationalState is not {}'.format(disabled),\n code=status.HTTP_409_CONFLICT)\n\n if not_in_use != instance.nsdUsageState:\n raise APIException(detail='NSD nsdUsageState is not {}'.format(not_in_use),\n code=status.HTTP_409_CONFLICT)\n\n remove_file('{}{}'.format(nsd_base_path, instance.id))\n super().destroy(request)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=['PUT'], url_path='nsd_content')\n def upload_content(self, request, **kwargs):\n instance = self.get_object()\n if created != instance.nsdOnboardingState:\n raise APIException(detail='NSD nsdOnboardingState is not {}'.format(created),\n code=status.HTTP_409_CONFLICT)\n\n if 'application/zip' not in request.META['HTTP_ACCEPT']:\n raise APIException(detail='HEAD need to have application/zip value')\n\n network_service_path = decompress_zip(\n request.data[\"file\"], '{}{}'.format(nsd_base_path, instance.id) + '/nsd_content/')\n network_service_descriptor = NetworkServiceDescriptor(path=network_service_path)\n nsd_content = network_service_descriptor.processing_data()\n vnf_pkg_ids_list = list()\n for vnfd in network_service_descriptor.get_constituent_vnfd():\n vnf_pkg_ids_list.append(str(VnfPkgInfo.objects.filter(vnfdId__iexact=vnfd['vnfd_id']).last().id))\n\n nsd_content['vnfPkgIds'] = json.dumps(vnf_pkg_ids_list)\n serializer = self.get_serializer(instance, data=nsd_content)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(status=status.HTTP_202_ACCEPTED)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__all__ = ['Swarmpose']
<|reserved_special_token_1|>
#a list of functions/Classes to be inported when a user imports * from swarmpose
__all__ = ['Swarmpose']
|
flexible
|
{
"blob_id": "e375501e6b815530e61af9181d4cade83d4588ca",
"index": 8762,
"step-1": "<mask token>\n",
"step-2": "__all__ = ['Swarmpose']\n",
"step-3": "#a list of functions/Classes to be inported when a user imports * from swarmpose\n__all__ = ['Swarmpose']",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def fun_nthfibonaccinumber(n):
n1 = 1
n2 = 1
if n == 0:
return n2
else:
for i in range(0, n - 1):
sum = n1 + n2
n1 = n2
n2 = sum
return n2
<|reserved_special_token_1|>
# Background: The Fibonacci numbers are defined by F(n) = F(n-1) + F(n-2).
# There are different conventions on whether 0 is a Fibonacci number,
# and whether counting starts at n=0 or at n=1. Here, we will assume that
# 0 is not a Fibonacci number, and that counting starts at n=0,
# so F(0)=F(1)=1, and F(2)=2. With this in mind, write the function
# nthfibonaccinumber(n) that takes a non-negative int n and returns the nth Fibonacci number.
def fun_nthfibonaccinumber(n):
n1 = 1
n2 = 1
if n == 0:
return n2
else:
for i in range(0,n-1):
sum = n1 + n2
n1 = n2
n2 = sum
return n2
|
flexible
|
{
"blob_id": "40744a8530df28f0bd8648900beb8a66e2d44cd0",
"index": 7730,
"step-1": "<mask token>\n",
"step-2": "def fun_nthfibonaccinumber(n):\n n1 = 1\n n2 = 1\n if n == 0:\n return n2\n else:\n for i in range(0, n - 1):\n sum = n1 + n2\n n1 = n2\n n2 = sum\n return n2\n",
"step-3": "# Background: The Fibonacci numbers are defined by F(n) = F(n-1) + F(n-2). \n# There are different conventions on whether 0 is a Fibonacci number, \n# and whether counting starts at n=0 or at n=1. Here, we will assume that \n# 0 is not a Fibonacci number, and that counting starts at n=0, \n# so F(0)=F(1)=1, and F(2)=2. With this in mind, write the function \n# nthfibonaccinumber(n) that takes a non-negative int n and returns the nth Fibonacci number.\n\n\n\ndef fun_nthfibonaccinumber(n):\n n1 = 1\n n2 = 1\n if n == 0:\n return n2\n else:\n for i in range(0,n-1):\n sum = n1 + n2\n n1 = n2\n n2 = sum\n return n2",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with webdriver.Chrome() as browser:
browser.get('http://suninjuly.github.io/selects1.html')
time.sleep(1)
x = int(browser.find_element_by_id('num1').text)
y = int(browser.find_element_by_id('num2').text)
sum_xy = str(int(x) + int(y))
browser.find_element_by_tag_name('select').click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(
sum_xy))
sum_opt.click()
browser.find_element_by_tag_name('button').click()
time.sleep(5)
<|reserved_special_token_1|>
from selenium import webdriver
import time
with webdriver.Chrome() as browser:
browser.get('http://suninjuly.github.io/selects1.html')
time.sleep(1)
x = int(browser.find_element_by_id('num1').text)
y = int(browser.find_element_by_id('num2').text)
sum_xy = str(int(x) + int(y))
browser.find_element_by_tag_name('select').click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(
sum_xy))
sum_opt.click()
browser.find_element_by_tag_name('button').click()
time.sleep(5)
<|reserved_special_token_1|>
from selenium import webdriver
import time
with webdriver.Chrome() as browser:
browser.get("http://suninjuly.github.io/selects1.html")
time.sleep(1)
x = int(browser.find_element_by_id("num1").text)
y = int(browser.find_element_by_id("num2").text)
sum_xy = str(int(x)+int(y))
browser.find_element_by_tag_name("select").click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(sum_xy))
sum_opt.click()
browser.find_element_by_tag_name("button").click()
time.sleep(5)
|
flexible
|
{
"blob_id": "42be9077ec51a9be1d4923011a38cd64d829f876",
"index": 1529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith webdriver.Chrome() as browser:\n browser.get('http://suninjuly.github.io/selects1.html')\n time.sleep(1)\n x = int(browser.find_element_by_id('num1').text)\n y = int(browser.find_element_by_id('num2').text)\n sum_xy = str(int(x) + int(y))\n browser.find_element_by_tag_name('select').click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(\n sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name('button').click()\n time.sleep(5)\n",
"step-3": "from selenium import webdriver\nimport time\nwith webdriver.Chrome() as browser:\n browser.get('http://suninjuly.github.io/selects1.html')\n time.sleep(1)\n x = int(browser.find_element_by_id('num1').text)\n y = int(browser.find_element_by_id('num2').text)\n sum_xy = str(int(x) + int(y))\n browser.find_element_by_tag_name('select').click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(\n sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name('button').click()\n time.sleep(5)\n",
"step-4": "from selenium import webdriver\nimport time\n\nwith webdriver.Chrome() as browser:\n browser.get(\"http://suninjuly.github.io/selects1.html\")\n time.sleep(1)\n x = int(browser.find_element_by_id(\"num1\").text)\n y = int(browser.find_element_by_id(\"num2\").text)\n sum_xy = str(int(x)+int(y))\n browser.find_element_by_tag_name(\"select\").click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name(\"button\").click()\n time.sleep(5)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python3
import sys
def stage_merge_checksums(
old_survey=None,
survey=None,
brickname=None,
**kwargs):
'''
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
'''
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
# produce per-brick checksum file.
with survey.write_output('checksums', brick=brickname, hashsum=False) as out:
f = open(out.fn, 'w')
# Update hashsums
for fn,hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
# Write outputs
for fn,hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True,
help='"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True,
help='Brick name to run')
parser.add_argument(
'-P', '--pickle', dest='pickle_pat',
help='Pickle filename pattern, default %(default)s',
default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None,
help='Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir',
help='Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir,
output_dir=old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {
'outliers': None,
}
prereqs.update({
'merge_checksums': 'outliers'
})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[stage],
write=[], **kwargs)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a98d03b169b59704b3b592cee0b59f5389fd77b3",
"index": 8899,
"step-1": "<mask token>\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef stage_merge_checksums(old_survey=None, survey=None, brickname=None, **\n kwargs):\n \"\"\"\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n \"\"\"\n from collections import OrderedDict\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n with survey.write_output('checksums', brick=brickname, hashsum=False\n ) as out:\n f = open(out.fn, 'w')\n for fn, hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n for fn, hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True, help=\n '\"Old\" output directory to read old checksum file from.')\n parser.add_argument('-b', '--brick', required=True, help=\n 'Brick name to run')\n parser.add_argument('-P', '--pickle', dest='pickle_pat', help=\n 'Pickle filename pattern, default %(default)s', default=\n 'pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None, help=\n 'Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir', help=\n 'Set output base directory, default \".\"')\n opt = parser.parse_args()\n optdict = vars(opt)\n old_output_dir = optdict.pop('old_output')\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir, output_dir=\n old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n from astrometry.util.stages import CallGlobalTime, runstage\n prereqs = {'outliers': None}\n prereqs.update({'merge_checksums': 'outliers'})\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[\n stage], write=[], **kwargs)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#! /usr/bin/env python3\nimport sys\n\ndef stage_merge_checksums(\n old_survey=None,\n survey=None,\n brickname=None,\n **kwargs):\n '''\n For debugging / special-case processing, read previous checksums, and update them with\n current checksums values, then write out the result.\n '''\n from collections import OrderedDict\n\n cfn = old_survey.find_file('checksums', brick=brickname)\n print('Old checksums:', cfn)\n checksums = OrderedDict()\n with open(cfn, 'r') as f:\n for line in f.readlines():\n words = line.split()\n fn = words[1]\n if fn.startswith('*'):\n fn = fn[1:]\n hashcode = words[0]\n checksums[fn] = hashcode\n\n # produce per-brick checksum file.\n with survey.write_output('checksums', brick=brickname, hashsum=False) as out:\n f = open(out.fn, 'w')\n # Update hashsums\n for fn,hashsum in survey.output_file_hashes.items():\n print('Updating checksum', fn, '=', hashsum)\n checksums[fn] = hashsum\n # Write outputs\n for fn,hashsum in checksums.items():\n f.write('%s *%s\\n' % (hashsum, fn))\n f.close()\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--old-output', required=True,\n help='\"Old\" output directory to read old checksum file from.')\n\n parser.add_argument('-b', '--brick', required=True,\n help='Brick name to run')\n parser.add_argument(\n '-P', '--pickle', dest='pickle_pat',\n help='Pickle filename pattern, default %(default)s',\n default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')\n parser.add_argument('-n', '--no-write', dest='write', default=True,\n action='store_false')\n parser.add_argument('--survey-dir', type=str, default=None,\n help='Override the $LEGACY_SURVEY_DIR environment variable')\n parser.add_argument('-d', '--outdir', dest='output_dir',\n help='Set output base directory, default \".\"')\n\n opt = parser.parse_args()\n optdict = vars(opt)\n\n old_output_dir = optdict.pop('old_output')\n\n from legacypipe.runbrick import get_runbrick_kwargs\n survey, kwargs = get_runbrick_kwargs(**optdict)\n if kwargs in [-1, 0]:\n return kwargs\n\n import logging\n lvl = logging.INFO\n logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)\n # tractor logging is *soooo* chatty\n logging.getLogger('tractor.engine').setLevel(lvl + 10)\n \n from legacypipe.survey import LegacySurveyData\n old_survey = LegacySurveyData(survey_dir=old_output_dir,\n output_dir=old_output_dir)\n kwargs.update(old_survey=old_survey)\n brickname = optdict['brick']\n\n from astrometry.util.stages import CallGlobalTime, runstage\n\n prereqs = {\n 'outliers': None,\n }\n prereqs.update({\n 'merge_checksums': 'outliers'\n })\n\n pickle_pat = optdict['pickle_pat']\n pickle_pat = pickle_pat % dict(brick=brickname)\n\n stagefunc = CallGlobalTime('stage_%s', globals())\n stage = 'merge_checksums'\n R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[stage],\n write=[], **kwargs)\n \nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import render_template, request, redirect, url_for
from flask_login import current_user
from application import app, db, login_required
from application.auth.models import User
from application.memes.models import Meme
from application.comments.forms import CommentForm
# only a dummy new comment form
@app.route("/comments/new/")
@login_required(role="ANY")
def comments_form():
return render_template("comments/new.html", form = CommentForm())
|
normal
|
{
"blob_id": "fe1d47b63e88935f8b2eb4bac883f3028d6f560b",
"index": 4515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/comments/new/')\n@login_required(role='ANY')\ndef comments_form():\n return render_template('comments/new.html', form=CommentForm())\n",
"step-3": "from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\nfrom application import app, db, login_required\nfrom application.auth.models import User\nfrom application.memes.models import Meme\nfrom application.comments.forms import CommentForm\n\n\[email protected]('/comments/new/')\n@login_required(role='ANY')\ndef comments_form():\n return render_template('comments/new.html', form=CommentForm())\n",
"step-4": "from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\n\nfrom application import app, db, login_required\nfrom application.auth.models import User\nfrom application.memes.models import Meme\nfrom application.comments.forms import CommentForm\n\n# only a dummy new comment form\n\[email protected](\"/comments/new/\")\n@login_required(role=\"ANY\")\ndef comments_form():\n return render_template(\"comments/new.html\", form = CommentForm())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
v0 = 5
g = 9.81
t = 0.6
y = v0 * t - 0.5 * g * t ** 2
print(y)
|
normal
|
{
"blob_id": "378032a8d02bc49e5ed8ebccbeddfbb281c2cbd7",
"index": 6231,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(y)\n",
"step-3": "v0 = 5\ng = 9.81\nt = 0.6\ny = v0 * t - 0.5 * g * t ** 2\nprint(y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
<|reserved_special_token_0|>
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
<|reserved_special_token_0|>
class SimilarityValidationMetric(OfflineMetric):
def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax
=None, **kwargs):
self._margin = np.array(margin)
assert argmax is None or self._margin.ndim == 1 and argmax in metric
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.
_metric))
if self._argmax is not None:
metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax], self.
_metric)
return tuple(metrics) + (self._margin[argmax],)
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='triplet', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='contrastive', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='fpair', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax
=None, **kwargs):
self._margin = np.array(margin)
assert argmax is None or self._margin.ndim == 1 and argmax in metric
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.
_metric))
if self._argmax is not None:
metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax], self.
_metric)
return tuple(metrics) + (self._margin[argmax],)
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='triplet', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='contrastive', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='fpair', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ContrastiveBatchScorer(BatchScorer):
def __init__(self, margin, *args, **kwargs):
self._margin = margin
self._sess = tf.Session()
super().__init__(*args, **kwargs)
<|reserved_special_token_0|>
class TripletBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(triplet_score(tf.convert_to_tensor(y_true,
tf.float32), tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32), metric=
metric))
class FlatPairBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
assert y_pred.shape[0] == y_true.shape[0] * 2
a, b = np.split(y_pred, 2)
dist = np.linalg.norm(a - b, axis=1)
return super().score(y_true, dist, metric)
class ContrastiveOnKerasMetric:
def __init__(self, margin, metric='accuracy'):
self.__name__ = 'contrastive_{}'.format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return contrastive_score(labels, embeddings, tf.convert_to_tensor(
self._margin), metric=self._metric)
class TripletOnKerasMetric:
def __init__(self, margin, metric='accuracy'):
self.__name__ = 'triplet_{}'.format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return triplet_score(labels, embeddings, tf.convert_to_tensor(self.
_margin), metric=self._metric)
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax
=None, **kwargs):
self._margin = np.array(margin)
assert argmax is None or self._margin.ndim == 1 and argmax in metric
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.
_metric))
if self._argmax is not None:
metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax], self.
_metric)
return tuple(metrics) + (self._margin[argmax],)
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='triplet', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='contrastive', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='fpair', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
square_norm = tf.diag_part(dot_product)
distances = tf.expand_dims(square_norm, 1
) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)
distances = tf.maximum(distances, 0.0)
if not squared:
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
distances = distances * (1.0 - mask)
return distances
<|reserved_special_token_0|>
class BatchScorer:
def __init__(self):
self._tp = 0
self._tn = 0
self._pcp = 0
self._pcn = 0
self._cp = 0
self._cn = 0
self._total = 0
def score(self, y_true, y_pred, metric):
raise NotImplementedError()
def handle(self, y_true, y_pred):
d = self.score(y_true, y_pred, ['tp', 'tn', 'pcp', 'pcn', 'cp',
'cn', 'total'])
self._tp += d['tp']
self._tn += d['tn']
self._pcp += d['pcp']
self._pcn += d['pcn']
self._cp += d['cp']
self._cn += d['cn']
self._total += d['total']
def result(self, metric):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
if metric == 'accuracy':
return (self._tp + self._tn) / self._total
if metric == 'precision':
return self._tp / self._pcp
if metric == 'recall':
return self._tp / self._cp
if metric == 'specificity':
return self._tn / self._cn
if metric == 'f1':
precision = self.result('precision')
recall = self.result('recall')
return 2 * precision * recall / (precision + recall)
if metric == 'bacc':
recall = self.result('recall')
specificity = self.result('specificity')
return (recall + specificity) / 2
raise NotImplementedError()
class ContrastiveBatchScorer(BatchScorer):
def __init__(self, margin, *args, **kwargs):
self._margin = margin
self._sess = tf.Session()
super().__init__(*args, **kwargs)
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(contrastive_score(tf.convert_to_tensor(
y_true, tf.float32), tf.convert_to_tensor(y_pred, tf.
float32), tf.convert_to_tensor(self._margin, tf.float32
), metric=metric))
class TripletBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(triplet_score(tf.convert_to_tensor(y_true,
tf.float32), tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32), metric=
metric))
class FlatPairBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
assert y_pred.shape[0] == y_true.shape[0] * 2
a, b = np.split(y_pred, 2)
dist = np.linalg.norm(a - b, axis=1)
return super().score(y_true, dist, metric)
class ContrastiveOnKerasMetric:
def __init__(self, margin, metric='accuracy'):
self.__name__ = 'contrastive_{}'.format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return contrastive_score(labels, embeddings, tf.convert_to_tensor(
self._margin), metric=self._metric)
class TripletOnKerasMetric:
def __init__(self, margin, metric='accuracy'):
self.__name__ = 'triplet_{}'.format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return triplet_score(labels, embeddings, tf.convert_to_tensor(self.
_margin), metric=self._metric)
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax
=None, **kwargs):
self._margin = np.array(margin)
assert argmax is None or self._margin.ndim == 1 and argmax in metric
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.
_metric))
if self._argmax is not None:
metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax], self.
_metric)
return tuple(metrics) + (self._margin[argmax],)
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='triplet', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='contrastive', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id='fpair', **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
def safe_nanmax(x):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore',
r'All-NaN (slice|axis) encountered')
return np.nanmax(x)
def safe_nanargmax(x):
try:
return np.nanargmax(x)
except ValueError:
return np.nan
def upper_triangular_flat(A):
ones = tf.ones_like(A)
mask_a = tf.matrix_band_part(ones, 0, -1)
mask_b = tf.matrix_band_part(ones, 0, 0)
mask = tf.cast(mask_a - mask_b, dtype=tf.bool)
return tf.boolean_mask(A, mask)
def pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = tf.matmul(embeddings, tf.transpose(embeddings))
square_norm = tf.diag_part(dot_product)
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * \
dot_product + tf.expand_dims(square_norm, 0)
distances = tf.maximum(distances, 0.0)
if not squared:
mask = tf.to_float(tf.equal(distances, 0.0))
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
distances = distances * (1.0 - mask)
return distances
def contrastive_score(labels, dist, thresholds, metric="accuracy"):
d = {}
if isinstance(metric, list):
for m in metric:
d[m] = True
else:
d[metric] = True
res = {}
if "total" in d:
res["total"] = tf.size(labels)
if "f1" in d:
precision = contrastive_score(
labels, dist, thresholds, metric="precision")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["f1"] = 2 * precision * recall / (precision + recall)
if "bacc" in d:
specificity = contrastive_score(
labels, dist, thresholds, metric="specificity")
recall = contrastive_score(labels, dist, thresholds, metric="recall")
res["metric"] = (specificity + recall) / 2
th = tf.reshape(thresholds, [1, -1])
dist = tf.reshape(dist, [-1, 1])
labels = tf.cast(tf.reshape(labels, [-1, 1]), tf.int32)
pred = tf.cast(dist < th, tf.int32)
tp = pred * labels
tn = (1 - pred) * (1 - labels)
corr = tp + tn
tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)
tn = tf.reduce_sum(tf.cast(tn, tf.float32), axis=0)
pred = tf.cast(pred, tf.float32)
corr = tf.cast(corr, tf.float32)
labels = tf.cast(labels, tf.float32)
if "accuracy" in d:
res["accuracy"] = tf.reduce_mean(corr, axis=0)
if "precision" in d:
res["precision"] = tp / tf.reduce_sum(pred, axis=0)
if "recall" in d:
res["recall"] = tp / tf.reduce_sum(labels)
if "specificity" in d:
res["specificity"] = tn / tf.reduce_sum(1 - labels)
if "tp" in d:
res["tp"] = tp
if "tn" in d:
res["tn"] = tn
if "pcp" in d:
res["pcp"] = tf.reduce_sum(pred, axis=0)
if "pcn" in d:
res["pcn"] = tf.reduce_sum(1 - pred, axis=0)
if "cp" in d:
res["cp"] = tf.reduce_sum(labels)
if "cn" in d:
res["cn"] = tf.reduce_sum(1 - labels)
if len(d) != len(res):
raise NotImplementedError("some metrics were not implemented")
if not isinstance(metric, list):
return next(iter(res.values()))
return res
def triplet_score(labels, embeddings, thresholds, metric="accuracy"):
dist = pairwise_distances(embeddings)
labels = tf.reshape(labels, [-1, 1])
pair_labels = tf.cast(tf.equal(labels, tf.transpose(labels)), tf.int32)
flat_labels = upper_triangular_flat(pair_labels)
flat_dist = upper_triangular_flat(dist)
return contrastive_score(flat_labels, flat_dist, thresholds, metric=metric)
class BatchScorer:
def __init__(self):
self._tp = 0
self._tn = 0
self._pcp = 0
self._pcn = 0
self._cp = 0
self._cn = 0
self._total = 0
def score(self, y_true, y_pred, metric):
raise NotImplementedError()
def handle(self, y_true, y_pred):
d = self.score(y_true, y_pred,
["tp", "tn", "pcp", "pcn", "cp", "cn", "total"])
self._tp += d["tp"]
self._tn += d["tn"]
self._pcp += d["pcp"]
self._pcn += d["pcn"]
self._cp += d["cp"]
self._cn += d["cn"]
self._total += d["total"]
def result(self, metric):
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
if metric == "accuracy":
return (self._tp + self._tn) / self._total
if metric == "precision":
return self._tp / self._pcp
if metric == "recall":
return self._tp / self._cp
if metric == "specificity":
return self._tn / self._cn
if metric == "f1":
precision = self.result("precision")
recall = self.result("recall")
return 2 * precision * recall / (precision + recall)
if metric == "bacc":
recall = self.result("recall")
specificity = self.result("specificity")
return (recall + specificity) / 2
raise NotImplementedError()
class ContrastiveBatchScorer(BatchScorer):
def __init__(self, margin, *args, **kwargs):
self._margin = margin
self._sess = tf.Session()
super().__init__(*args, **kwargs)
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
contrastive_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class TripletBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
with graph.as_default():
return sess.run(
triplet_score(
tf.convert_to_tensor(y_true, tf.float32),
tf.convert_to_tensor(y_pred, tf.float32),
tf.convert_to_tensor(self._margin, tf.float32),
metric=metric))
class FlatPairBatchScorer(ContrastiveBatchScorer):
def score(self, y_true, y_pred, metric):
assert y_pred.shape[0] == y_true.shape[0] * 2
a, b = np.split(y_pred, 2)
dist = np.linalg.norm(a - b, axis=1)
return super().score(y_true, dist, metric)
class ContrastiveOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "contrastive_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return contrastive_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class TripletOnKerasMetric:
def __init__(self, margin, metric="accuracy"):
self.__name__ = "triplet_{}".format(metric)
self._margin = margin
self._metric = metric
def __call__(self, labels, embeddings):
return triplet_score(
labels,
embeddings,
tf.convert_to_tensor(self._margin),
metric=self._metric)
class OfflineMetric:
def __init__(self, *args, **kwargs):
self.__name__ = self.name()
def name(self):
raise NotImplementedError()
def handle_batch(self, model, x, labels, pred):
raise NotImplementedError()
def result(self):
raise NotImplementedError()
def reset(self):
pass
class SimilarityValidationMetric(OfflineMetric):
def __init__(self,
margin,
*args,
id="sim",
metric=["accuracy"],
argmax=None,
**kwargs):
self._margin = np.array(margin)
assert argmax is None or (self._margin.ndim == 1 and argmax in metric)
self._metric = metric if isinstance(metric, list) else [metric]
self._argmax = argmax
self._scorer = None
self._id = id
super().__init__(self, *args, **kwargs)
def name(self):
metrics = list(
map(lambda x: "val_{}_{}".format(self._id, x), self._metric))
if self._argmax is not None:
metrics.append("val_{}_argmax_{}".format(self._id, self._argmax))
return tuple(metrics)
def handle_batch(self, model, x, labels, pred):
self._scorer.handle(labels, pred)
def result(self):
if self._argmax is None:
metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),
self._metric)
return tuple(metrics)
else:
argmax = safe_nanargmax(self._scorer.result(self._argmax))
metrics = map(lambda x: self._scorer.result(x)[argmax],
self._metric)
return tuple(metrics) + (self._margin[argmax], )
class TripletValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="triplet", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = TripletBatchScorer(self._margin)
class ContrastiveValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="contrastive", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = ContrastiveBatchScorer(self._margin)
class FlatPairValidationMetric(SimilarityValidationMetric):
def __init__(self, *args, id="fpair", **kwargs):
super().__init__(*args, id=id, **kwargs)
def reset(self):
self._scorer = FlatPairBatchScorer(self._margin)
|
flexible
|
{
"blob_id": "16bf4583b872f038edccbac4e567c1854d65e216",
"index": 4962,
"step-1": "<mask token>\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n <mask token>\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n <mask token>\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-2": "<mask token>\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-3": "<mask token>\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n <mask token>\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(triplet_score(tf.convert_to_tensor(y_true,\n tf.float32), tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32), metric=\n metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'contrastive_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(labels, embeddings, tf.convert_to_tensor(\n self._margin), metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'triplet_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(labels, embeddings, tf.convert_to_tensor(self.\n _margin), metric=self._metric)\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-4": "<mask token>\n\n\ndef pairwise_distances(embeddings, squared=False):\n \"\"\"Compute the 2D matrix of distances between all the embeddings.\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n squared: Boolean. If true, output is the pairwise squared euclidean \n distance matrix. \n If false, output is the pairwise euclidean distance matrix.\n Returns:\n pairwise_distances: tensor of shape (batch_size, batch_size)\n \"\"\"\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n distances = tf.expand_dims(square_norm, 1\n ) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)\n distances = tf.maximum(distances, 0.0)\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n return distances\n\n\n<mask token>\n\n\nclass BatchScorer:\n\n def __init__(self):\n self._tp = 0\n self._tn = 0\n self._pcp = 0\n self._pcn = 0\n self._cp = 0\n self._cn = 0\n self._total = 0\n\n def score(self, y_true, y_pred, metric):\n raise NotImplementedError()\n\n def handle(self, y_true, y_pred):\n d = self.score(y_true, y_pred, ['tp', 'tn', 'pcp', 'pcn', 'cp',\n 'cn', 'total'])\n self._tp += d['tp']\n self._tn += d['tn']\n self._pcp += d['pcp']\n self._pcn += d['pcn']\n self._cp += d['cp']\n self._cn += d['cn']\n self._total += d['total']\n\n def result(self, metric):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore')\n if metric == 'accuracy':\n return (self._tp + self._tn) / self._total\n if metric == 'precision':\n return self._tp / self._pcp\n if metric == 'recall':\n return self._tp / self._cp\n if metric == 'specificity':\n return self._tn / self._cn\n if metric == 'f1':\n precision = self.result('precision')\n recall = self.result('recall')\n return 2 * precision * recall / (precision + recall)\n if metric == 'bacc':\n recall = self.result('recall')\n specificity = self.result('specificity')\n return (recall + specificity) / 2\n raise NotImplementedError()\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(contrastive_score(tf.convert_to_tensor(\n y_true, tf.float32), tf.convert_to_tensor(y_pred, tf.\n float32), tf.convert_to_tensor(self._margin, tf.float32\n ), metric=metric))\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(triplet_score(tf.convert_to_tensor(y_true,\n tf.float32), tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32), metric=\n metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'contrastive_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(labels, embeddings, tf.convert_to_tensor(\n self._margin), metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n\n def __init__(self, margin, metric='accuracy'):\n self.__name__ = 'triplet_{}'.format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(labels, embeddings, tf.convert_to_tensor(self.\n _margin), metric=self._metric)\n\n\nclass OfflineMetric:\n\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n\n def __init__(self, margin, *args, id='sim', metric=['accuracy'], argmax\n =None, **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or self._margin.ndim == 1 and argmax in metric\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(map(lambda x: 'val_{}_{}'.format(self._id, x), self.\n _metric))\n if self._argmax is not None:\n metrics.append('val_{}_argmax_{}'.format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax], self.\n _metric)\n return tuple(metrics) + (self._margin[argmax],)\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='triplet', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='contrastive', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n\n def __init__(self, *args, id='fpair', **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-5": "import tensorflow as tf\nimport numpy as np\n\n\ndef safe_nanmax(x):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore',\n r'All-NaN (slice|axis) encountered')\n return np.nanmax(x)\n\n\ndef safe_nanargmax(x):\n try:\n return np.nanargmax(x)\n except ValueError:\n return np.nan\n\n\ndef upper_triangular_flat(A):\n ones = tf.ones_like(A)\n mask_a = tf.matrix_band_part(ones, 0, -1)\n mask_b = tf.matrix_band_part(ones, 0, 0)\n mask = tf.cast(mask_a - mask_b, dtype=tf.bool)\n\n return tf.boolean_mask(A, mask)\n\n\ndef pairwise_distances(embeddings, squared=False):\n \"\"\"Compute the 2D matrix of distances between all the embeddings.\n Args:\n embeddings: tensor of shape (batch_size, embed_dim)\n squared: Boolean. If true, output is the pairwise squared euclidean \n distance matrix. \n If false, output is the pairwise euclidean distance matrix.\n Returns:\n pairwise_distances: tensor of shape (batch_size, batch_size)\n \"\"\"\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = tf.expand_dims(square_norm, 1) - 2.0 * \\\n dot_product + tf.expand_dims(square_norm, 0)\n\n distances = tf.maximum(distances, 0.0)\n\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n\n return distances\n\n\ndef contrastive_score(labels, dist, thresholds, metric=\"accuracy\"):\n d = {}\n if isinstance(metric, list):\n for m in metric:\n d[m] = True\n else:\n d[metric] = True\n res = {}\n\n if \"total\" in d:\n res[\"total\"] = tf.size(labels)\n if \"f1\" in d:\n precision = contrastive_score(\n labels, dist, thresholds, metric=\"precision\")\n recall = contrastive_score(labels, dist, thresholds, metric=\"recall\")\n res[\"f1\"] = 2 * precision * recall / (precision + recall)\n if \"bacc\" in d:\n specificity = contrastive_score(\n labels, dist, thresholds, metric=\"specificity\")\n recall = contrastive_score(labels, dist, thresholds, metric=\"recall\")\n res[\"metric\"] = (specificity + recall) / 2\n\n th = tf.reshape(thresholds, [1, -1])\n dist = tf.reshape(dist, [-1, 1])\n\n labels = tf.cast(tf.reshape(labels, [-1, 1]), tf.int32)\n pred = tf.cast(dist < th, tf.int32)\n\n tp = pred * labels\n tn = (1 - pred) * (1 - labels)\n corr = tp + tn\n\n tp = tf.reduce_sum(tf.cast(tp, tf.float32), axis=0)\n tn = tf.reduce_sum(tf.cast(tn, tf.float32), axis=0)\n pred = tf.cast(pred, tf.float32)\n corr = tf.cast(corr, tf.float32)\n labels = tf.cast(labels, tf.float32)\n\n if \"accuracy\" in d:\n res[\"accuracy\"] = tf.reduce_mean(corr, axis=0)\n if \"precision\" in d:\n res[\"precision\"] = tp / tf.reduce_sum(pred, axis=0)\n if \"recall\" in d:\n res[\"recall\"] = tp / tf.reduce_sum(labels)\n if \"specificity\" in d:\n res[\"specificity\"] = tn / tf.reduce_sum(1 - labels)\n if \"tp\" in d:\n res[\"tp\"] = tp\n if \"tn\" in d:\n res[\"tn\"] = tn\n if \"pcp\" in d:\n res[\"pcp\"] = tf.reduce_sum(pred, axis=0)\n if \"pcn\" in d:\n res[\"pcn\"] = tf.reduce_sum(1 - pred, axis=0)\n if \"cp\" in d:\n res[\"cp\"] = tf.reduce_sum(labels)\n if \"cn\" in d:\n res[\"cn\"] = tf.reduce_sum(1 - labels)\n\n if len(d) != len(res):\n raise NotImplementedError(\"some metrics were not implemented\")\n if not isinstance(metric, list):\n return next(iter(res.values()))\n return res\n\n\ndef triplet_score(labels, embeddings, thresholds, metric=\"accuracy\"):\n dist = pairwise_distances(embeddings)\n labels = tf.reshape(labels, [-1, 1])\n pair_labels = tf.cast(tf.equal(labels, tf.transpose(labels)), tf.int32)\n flat_labels = upper_triangular_flat(pair_labels)\n flat_dist = upper_triangular_flat(dist)\n\n return contrastive_score(flat_labels, flat_dist, thresholds, metric=metric)\n\n\nclass BatchScorer:\n def __init__(self):\n self._tp = 0\n self._tn = 0\n self._pcp = 0\n self._pcn = 0\n self._cp = 0\n self._cn = 0\n self._total = 0\n\n def score(self, y_true, y_pred, metric):\n raise NotImplementedError()\n\n def handle(self, y_true, y_pred):\n d = self.score(y_true, y_pred,\n [\"tp\", \"tn\", \"pcp\", \"pcn\", \"cp\", \"cn\", \"total\"])\n self._tp += d[\"tp\"]\n self._tn += d[\"tn\"]\n self._pcp += d[\"pcp\"]\n self._pcn += d[\"pcn\"]\n self._cp += d[\"cp\"]\n self._cn += d[\"cn\"]\n self._total += d[\"total\"]\n\n def result(self, metric):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\")\n\n if metric == \"accuracy\":\n return (self._tp + self._tn) / self._total\n if metric == \"precision\":\n return self._tp / self._pcp\n if metric == \"recall\":\n return self._tp / self._cp\n if metric == \"specificity\":\n return self._tn / self._cn\n if metric == \"f1\":\n precision = self.result(\"precision\")\n recall = self.result(\"recall\")\n return 2 * precision * recall / (precision + recall)\n if metric == \"bacc\":\n recall = self.result(\"recall\")\n specificity = self.result(\"specificity\")\n return (recall + specificity) / 2\n\n raise NotImplementedError()\n\n\nclass ContrastiveBatchScorer(BatchScorer):\n def __init__(self, margin, *args, **kwargs):\n self._margin = margin\n self._sess = tf.Session()\n super().__init__(*args, **kwargs)\n\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(\n contrastive_score(\n tf.convert_to_tensor(y_true, tf.float32),\n tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32),\n metric=metric))\n\n\nclass TripletBatchScorer(ContrastiveBatchScorer):\n def score(self, y_true, y_pred, metric):\n graph = tf.Graph()\n with tf.Session(graph=graph) as sess:\n with graph.as_default():\n return sess.run(\n triplet_score(\n tf.convert_to_tensor(y_true, tf.float32),\n tf.convert_to_tensor(y_pred, tf.float32),\n tf.convert_to_tensor(self._margin, tf.float32),\n metric=metric))\n\n\nclass FlatPairBatchScorer(ContrastiveBatchScorer):\n def score(self, y_true, y_pred, metric):\n assert y_pred.shape[0] == y_true.shape[0] * 2\n a, b = np.split(y_pred, 2)\n dist = np.linalg.norm(a - b, axis=1)\n return super().score(y_true, dist, metric)\n\n\nclass ContrastiveOnKerasMetric:\n def __init__(self, margin, metric=\"accuracy\"):\n self.__name__ = \"contrastive_{}\".format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return contrastive_score(\n labels,\n embeddings,\n tf.convert_to_tensor(self._margin),\n metric=self._metric)\n\n\nclass TripletOnKerasMetric:\n def __init__(self, margin, metric=\"accuracy\"):\n self.__name__ = \"triplet_{}\".format(metric)\n self._margin = margin\n self._metric = metric\n\n def __call__(self, labels, embeddings):\n return triplet_score(\n labels,\n embeddings,\n tf.convert_to_tensor(self._margin),\n metric=self._metric)\n\n\nclass OfflineMetric:\n def __init__(self, *args, **kwargs):\n self.__name__ = self.name()\n\n def name(self):\n raise NotImplementedError()\n\n def handle_batch(self, model, x, labels, pred):\n raise NotImplementedError()\n\n def result(self):\n raise NotImplementedError()\n\n def reset(self):\n pass\n\n\nclass SimilarityValidationMetric(OfflineMetric):\n def __init__(self,\n margin,\n *args,\n id=\"sim\",\n metric=[\"accuracy\"],\n argmax=None,\n **kwargs):\n self._margin = np.array(margin)\n assert argmax is None or (self._margin.ndim == 1 and argmax in metric)\n self._metric = metric if isinstance(metric, list) else [metric]\n self._argmax = argmax\n self._scorer = None\n self._id = id\n super().__init__(self, *args, **kwargs)\n\n def name(self):\n metrics = list(\n map(lambda x: \"val_{}_{}\".format(self._id, x), self._metric))\n if self._argmax is not None:\n metrics.append(\"val_{}_argmax_{}\".format(self._id, self._argmax))\n return tuple(metrics)\n\n def handle_batch(self, model, x, labels, pred):\n self._scorer.handle(labels, pred)\n\n def result(self):\n if self._argmax is None:\n metrics = map(lambda x: safe_nanmax(self._scorer.result(x)),\n self._metric)\n return tuple(metrics)\n else:\n argmax = safe_nanargmax(self._scorer.result(self._argmax))\n metrics = map(lambda x: self._scorer.result(x)[argmax],\n self._metric)\n return tuple(metrics) + (self._margin[argmax], )\n\n\nclass TripletValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"triplet\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = TripletBatchScorer(self._margin)\n\n\nclass ContrastiveValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"contrastive\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = ContrastiveBatchScorer(self._margin)\n\n\nclass FlatPairValidationMetric(SimilarityValidationMetric):\n def __init__(self, *args, id=\"fpair\", **kwargs):\n super().__init__(*args, id=id, **kwargs)\n\n def reset(self):\n self._scorer = FlatPairBatchScorer(self._margin)\n",
"step-ids": [
18,
20,
32,
39,
46
]
}
|
[
18,
20,
32,
39,
46
] |
import os
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plotObject(obj):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = numpy.nonzero(obj>0)
ax.scatter(x,y,z,c='r',s=10)
xb,yb,zb = numpy.nonzero(obj<0)
ax.scatter(xb,yb,zb,c='b',s=1)
plt.show()
class GridData:
def __init__(self,datafile,labelfile):
f = open(datafile,'rb')
f2 = open(labelfile,'r')
self.samples = []
self.labels = []
self.label_names = []
self.data_size = 30
self.source = datafile
sample_size = self.data_size ** 3
file_size = os.path.getsize(datafile)
self.num_samples = file_size / sample_size
for i in range(self.num_samples):
arr = numpy.fromfile(f,dtype=numpy.int8,count=sample_size)
matrix = arr.reshape((self.data_size,self.data_size,self.data_size))
self.samples.append(matrix.transpose())
l = f2.readline().split()
self.labels.append(int(l[0]))
self.label_names.append(l[1])
def __str__(self):
return "<%s %d samples (%dx%dx%d)>" % (self.source,self.num_samples,self.data_size,self.data_size,self.data_size)
def __repr__(self):
return str(self)
if __name__=="__main__":
partial_view_file = 'partial_view_single.data'
complete_view_file = 'complete_view_single.data'
label_file = 'labels_single.data'
partial_views = GridData(partial_view_file,label_file)
complete_views = GridData(complete_view_file,label_file)
print(partial_views)
print(complete_views)
for i in range(partial_views.num_samples):
plotObject(partial_views.samples[i])
plotObject(complete_views.samples[i])
|
normal
|
{
"blob_id": "8475792cc2d55f030f0bd9e7d0240e3b59ed996b",
"index": 7774,
"step-1": "<mask token>\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plotObject(obj):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x, y, z = numpy.nonzero(obj > 0)\n ax.scatter(x, y, z, c='r', s=10)\n xb, yb, zb = numpy.nonzero(obj < 0)\n ax.scatter(xb, yb, zb, c='b', s=1)\n plt.show()\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\n<mask token>\n",
"step-4": "import os\nimport numpy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef plotObject(obj):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n x, y, z = numpy.nonzero(obj > 0)\n ax.scatter(x, y, z, c='r', s=10)\n xb, yb, zb = numpy.nonzero(obj < 0)\n ax.scatter(xb, yb, zb, c='b', s=1)\n plt.show()\n\n\nclass GridData:\n\n def __init__(self, datafile, labelfile):\n f = open(datafile, 'rb')\n f2 = open(labelfile, 'r')\n self.samples = []\n self.labels = []\n self.label_names = []\n self.data_size = 30\n self.source = datafile\n sample_size = self.data_size ** 3\n file_size = os.path.getsize(datafile)\n self.num_samples = file_size / sample_size\n for i in range(self.num_samples):\n arr = numpy.fromfile(f, dtype=numpy.int8, count=sample_size)\n matrix = arr.reshape((self.data_size, self.data_size, self.\n data_size))\n self.samples.append(matrix.transpose())\n l = f2.readline().split()\n self.labels.append(int(l[0]))\n self.label_names.append(l[1])\n\n def __str__(self):\n return '<%s %d samples (%dx%dx%d)>' % (self.source, self.\n num_samples, self.data_size, self.data_size, self.data_size)\n\n def __repr__(self):\n return str(self)\n\n\nif __name__ == '__main__':\n partial_view_file = 'partial_view_single.data'\n complete_view_file = 'complete_view_single.data'\n label_file = 'labels_single.data'\n partial_views = GridData(partial_view_file, label_file)\n complete_views = GridData(complete_view_file, label_file)\n print(partial_views)\n print(complete_views)\n for i in range(partial_views.num_samples):\n plotObject(partial_views.samples[i])\n plotObject(complete_views.samples[i])\n",
"step-5": "import os\nimport numpy\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef plotObject(obj):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tx,y,z = numpy.nonzero(obj>0)\n\tax.scatter(x,y,z,c='r',s=10)\n\txb,yb,zb = numpy.nonzero(obj<0)\n\tax.scatter(xb,yb,zb,c='b',s=1)\n\tplt.show()\n\nclass GridData:\n\tdef __init__(self,datafile,labelfile):\n\t\tf = open(datafile,'rb')\n\t\tf2 = open(labelfile,'r')\n\t\tself.samples = []\n\t\tself.labels = []\n\t\tself.label_names = []\n\t\tself.data_size = 30\n\t\tself.source = datafile\n\t\tsample_size = self.data_size ** 3\n\t\tfile_size = os.path.getsize(datafile)\n\t\tself.num_samples = file_size / sample_size\n\t\tfor i in range(self.num_samples):\n\t\t\tarr = numpy.fromfile(f,dtype=numpy.int8,count=sample_size)\n\t\t\tmatrix = arr.reshape((self.data_size,self.data_size,self.data_size))\n\t\t\tself.samples.append(matrix.transpose())\n\t\t\tl = f2.readline().split()\n\t\t\tself.labels.append(int(l[0]))\n\t\t\tself.label_names.append(l[1])\n\t\n\tdef __str__(self):\n\t\treturn \"<%s %d samples (%dx%dx%d)>\" % (self.source,self.num_samples,self.data_size,self.data_size,self.data_size)\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\nif __name__==\"__main__\":\n\tpartial_view_file = 'partial_view_single.data'\n\tcomplete_view_file = 'complete_view_single.data'\n\tlabel_file = 'labels_single.data'\n\n\tpartial_views = GridData(partial_view_file,label_file)\n\tcomplete_views = GridData(complete_view_file,label_file)\n\tprint(partial_views)\n\tprint(complete_views)\n\n\tfor i in range(partial_views.num_samples):\n\t\tplotObject(partial_views.samples[i])\n\t\tplotObject(complete_views.samples[i])\n",
"step-ids": [
2,
4,
5,
7,
8
]
}
|
[
2,
4,
5,
7,
8
] |
"""
db.集合.update()
"""
"""
实例 被替换了
> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "kongming", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例2 利用$set:只修改匹配到的值
> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "空明被修改", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例3 修改多条
db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})
"""
|
normal
|
{
"blob_id": "7d8c2aa5674704d4443034c29bbdc715da9fd567",
"index": 5022,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\ndb.集合.update()\n\n\"\"\"\n\"\"\"\n实例 被替换了\n> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"kongming\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\"\"\"\n\n\"\"\"\n实例2 利用$set:只修改匹配到的值\n> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"空明被修改\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\n\"\"\"\n\"\"\"\n实例3 修改多条\ndb.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
<|reserved_special_token_0|>
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return ' '.join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return ' '.join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
main()
<|reserved_special_token_1|>
"""asks the user for english words to latinize"""
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return " ".join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
main()
|
flexible
|
{
"blob_id": "5810739300067e8f207d09bf971484a278372a9a",
"index": 5246,
"step-1": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()\n",
"step-5": "\"\"\"asks the user for english words to latinize\"\"\"\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return \" \".join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import requests
from requests.auth import HTTPBasicAuth
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {
"query": query, "variables":{
"username": username
}
}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {
"query": query2, "variables":{
"queryString": query
}
}
return __run_query(self, json)
|
normal
|
{
"blob_id": "fa511411e59880fd80fba0ccc49c95d42cb4b78d",
"index": 6962,
"step-1": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-3": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-4": "import requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-5": "import requests\nfrom requests.auth import HTTPBasicAuth\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n\n request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))\n\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query, \"variables\":{\n \"username\": username\n }\n }\n\n return __run_query(self, json)\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query2, \"variables\":{\n \"queryString\": query\n }\n }\n\n return __run_query(self, json)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# type: ignore[no-redef]
import pytest
@pytest.mark.asyncio
@pytest.mark.core
async def test_async_executor(executor):
def func():
pass
result = await executor.run(func)
assert result is None
def func():
return 1
result = await executor.run(func)
assert result == 1
def func(x: int, /, y: float):
return x / y
result = await executor.run(func, 0, 1)
assert result == 0
result = await executor.run(func, 0, y=1)
assert result == 0
def func(x: int, y: float, **kwargs):
return x + y + kwargs.get("test")
result = await executor.run(func, 0, y=1, test=2)
assert result == 3
|
normal
|
{
"blob_id": "67b483d9d002cc66dd368cf53fdc49ebb7b4f4d4",
"index": 9556,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\[email protected]\nasync def test_async_executor(executor):\n\n def func():\n pass\n result = await executor.run(func)\n assert result is None\n\n def func():\n return 1\n result = await executor.run(func)\n assert result == 1\n\n def func(x: int, /, y: float):\n return x / y\n result = await executor.run(func, 0, 1)\n assert result == 0\n result = await executor.run(func, 0, y=1)\n assert result == 0\n\n def func(x: int, y: float, **kwargs):\n return x + y + kwargs.get('test')\n result = await executor.run(func, 0, y=1, test=2)\n assert result == 3\n",
"step-3": "import pytest\n\n\[email protected]\[email protected]\nasync def test_async_executor(executor):\n\n def func():\n pass\n result = await executor.run(func)\n assert result is None\n\n def func():\n return 1\n result = await executor.run(func)\n assert result == 1\n\n def func(x: int, /, y: float):\n return x / y\n result = await executor.run(func, 0, 1)\n assert result == 0\n result = await executor.run(func, 0, y=1)\n assert result == 0\n\n def func(x: int, y: float, **kwargs):\n return x + y + kwargs.get('test')\n result = await executor.run(func, 0, y=1, test=2)\n assert result == 3\n",
"step-4": "# type: ignore[no-redef]\nimport pytest\n\n\[email protected]\[email protected]\nasync def test_async_executor(executor):\n def func():\n pass\n\n result = await executor.run(func)\n assert result is None\n\n def func():\n return 1\n\n result = await executor.run(func)\n assert result == 1\n\n def func(x: int, /, y: float):\n return x / y\n\n result = await executor.run(func, 0, 1)\n assert result == 0\n\n result = await executor.run(func, 0, y=1)\n assert result == 0\n\n def func(x: int, y: float, **kwargs):\n return x + y + kwargs.get(\"test\")\n\n result = await executor.run(func, 0, y=1, test=2)\n assert result == 3\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy
from math import cos, sin, radians, tan
class Window:
# construtor
def __init__(self, world, xyw_min=None, xyw_max=None):
self.world = world
# caso em q é None
if xyw_min is None or xyw_max is None:
self.xyw_min = (-100, -100)
self.xyw_max = (100, 100)
# caso em q n é None
else:
if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:
raise Exception('O param xyw_min deve ser uma tupla de 2 valores.')
try:
self.xyw_min = (float(xyw_min[0]), float(xyw_min[1]))
except Exception:
raise Exception('As coordenadas xyw_min devem ser pares de números.')
if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:
raise Exception('O param xyw_max deve ser uma tupla de 2 valores.')
try:
self.xyw_max = (float(xyw_max[0]), float(xyw_max[1]))
except Exception:
raise Exception('As coordenadas xyw_max devem ser pares de números.')
self.xyw_1 = self.xyw_min
self.xyw_2 = (self.xyw_max[0], self.xyw_min[1])
self.xyw_3 = (self.xyw_min[0], self.xyw_max[1])
self.xyw_4 = self.xyw_max
# define o centro original da window(attr q pode ser usado para trazer a view de volta ao seu centro original)
self.center = self.calcCenter()
# define o novo centro(var que pode ser utilizada em futuros calculos envolvendo o centro da window)
self.newCenter = self.center
self.fatorMovimento = 10
self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# inicializa scn da window
self.degrees = 0
self.scn()
def set_xyw_min(self, xmin, ymin):
self.xyw_min = (xmin, ymin)
def set_xyw_max(self, xmax, ymax):
self.xyw_max = (xmax, ymax)
# retorna as coordenadas (x,y) do centro da window
def calcCenter(self) -> (float, float):
return (self.xyw_min[0] + self.xyw_max[0]) / 2, (self.xyw_min[1] + self.xyw_max[1]) / 2
# retorna as coordenadas do canto inferior esquerdo e canto superior direito da window
def getCoords(self) -> (float, float, float, float):
return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[1]
# retorna a largura e profundidade da window
def getWindowDimensions(self) -> (float, float):
xyw1 = numpy.array([self.xyw_1[0], self.xyw_1[1]])
xyw2 = numpy.array([self.xyw_2[0], self.xyw_2[1]])
xyw3 = numpy.array([self.xyw_3[0], self.xyw_3[1]])
return numpy.linalg.norm(xyw2 - xyw1), numpy.linalg.norm(xyw3 - xyw1)
# translada a window para cima, do ponto de vista do usuario
def moveUp(self):
rad_angle = numpy.radians(self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=self.fatorMovimento * sin, dy=self.fatorMovimento * cos)
# translada a window para baixo, do ponto de vista do usuario
def moveDown(self):
rad_angle = numpy.radians(self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=(-1) * self.fatorMovimento * sin, dy=(-1) * self.fatorMovimento * cos)
# translada a window para direita, do ponto de vista do usuario
def moveRight(self):
rad_angle = numpy.radians(180 - self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=(-1) * self.fatorMovimento * cos, dy=(-1) * self.fatorMovimento * sin)
# translada a window para esquerda, do ponto de vista do usuario
def moveLeft(self):
rad_angle = numpy.radians(180 - self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=self.fatorMovimento * cos, dy=self.fatorMovimento * sin)
# realiza a translaçao da window
def _translate(self, dx=0, dy=0):
# cria a matriz de translacao do obj para um dx e dy qualquer
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# realiza a translacao
translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])
# atualiza a window
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, translate_matrix)
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Encolhe a window
def zoomIn(self):
self._scale(scale=0.9)
self.fatorMovimento = self.fatorMovimento * 0.9
# Aumenta a window
def zoomOut(self):
self._scale(scale=1.1)
self.fatorMovimento = self.fatorMovimento * 1.1
# Escalona a window
def _scale(self, scale=1):
# centro do obj
cx, cy = self.newCenter
# coords do mundo
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# realiza o escalonamento(num sei se esse e o termo correto)
scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])
# reverte o ajuste do centro do mundo com o obj
translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
# monta uma matriz que aplica todas as transformacoes
transformations = numpy.matmul(translate_matrix_1, scale_matrix)
transformations = numpy.matmul(transformations, translate_matrix_2)
# aplica as transformacoes
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)
# atualiza xyw_min/max
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Rotaciona a window no sentido horario
def rotateRight(self, angle):
# 360 - 10 = 350
self._rotate(360 - angle)
# Rotaciona a window no sentido anti-horario
def rotateLeft(self, angle):
self._rotate(angle)
# Rotaciona a window em relaçao ao seu proprio centro
def _rotate(self, angle=0):
self.degrees = (self.degrees + angle) % 360
# centro do obj
cx, cy = self.newCenter
# coords do mundo
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# realiza a rotacao
radians = numpy.radians(angle)
sin = numpy.sin(radians)
cos = numpy.cos(radians)
rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
# reverte a transformacao feita
translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
# gera a matriz de transformacao de rotacao
transformations = numpy.matmul(translate_matrix_1, rotate_matrix)
transformations = numpy.matmul(transformations, translate_matrix_2)
# aplica as transformacoes
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)
# atualiza xyw_min/max
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Calcula a matriz de transformaçao de sistemas de coordenadas da window
def scn(self):
# centro do obj
cx, cy = self.newCenter
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# pega ao INVERSO da rotacao atual da window
radians = numpy.radians((-1) * self.degrees)
sin = numpy.sin(radians)
cos = numpy.cos(radians)
# rotaciona
rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
length, height = self.getWindowDimensions()
sx = 1 / (length / 2)
sy = 1 / (height / 2)
# realiza o escalonamento(num sei se esse e o termo correto)
scale_matrix = numpy.array([[sx, 0, 0], [0, sy, 0], [0, 0, 1]])
# gera a matriz de conversao para scn da window
scn = numpy.matmul(translate_matrix_1, rotate_matrix)
self.window_scn = numpy.matmul(scn, scale_matrix)
# Aplica a matriz de transformaçao de sistema de coordenadas da window a um ponto qualquer
def applySCN(self, x, y):
point_coords = numpy.array([x, y, 1])
final_coords = numpy.matmul(point_coords, self.window_scn)
return final_coords[0], final_coords[1]
|
normal
|
{
"blob_id": "deb0cd745eae97a6dbabdfab37e1c6d75e5372f0",
"index": 8422,
"step-1": "<mask token>\n\n\nclass Window:\n\n def __init__(self, world, xyw_min=None, xyw_max=None):\n self.world = world\n if xyw_min is None or xyw_max is None:\n self.xyw_min = -100, -100\n self.xyw_max = 100, 100\n else:\n if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:\n raise Exception(\n 'O param xyw_min deve ser uma tupla de 2 valores.')\n try:\n self.xyw_min = float(xyw_min[0]), float(xyw_min[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_min devem ser pares de números.')\n if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:\n raise Exception(\n 'O param xyw_max deve ser uma tupla de 2 valores.')\n try:\n self.xyw_max = float(xyw_max[0]), float(xyw_max[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_max devem ser pares de números.')\n self.xyw_1 = self.xyw_min\n self.xyw_2 = self.xyw_max[0], self.xyw_min[1]\n self.xyw_3 = self.xyw_min[0], self.xyw_max[1]\n self.xyw_4 = self.xyw_max\n self.center = self.calcCenter()\n self.newCenter = self.center\n self.fatorMovimento = 10\n self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n self.degrees = 0\n self.scn()\n\n def set_xyw_min(self, xmin, ymin):\n self.xyw_min = xmin, ymin\n\n def set_xyw_max(self, xmax, ymax):\n self.xyw_max = xmax, ymax\n <mask token>\n\n def getCoords(self) ->(float, float, float, float):\n return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[\n 1]\n <mask token>\n\n def moveUp(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * sin, dy=self.\n fatorMovimento * cos)\n\n def moveDown(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * sin, dy=-1 * self.\n fatorMovimento * cos)\n\n def moveRight(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * cos, dy=-1 * self.\n fatorMovimento * sin)\n\n def moveLeft(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * cos, dy=self.\n fatorMovimento * sin)\n <mask token>\n\n def zoomIn(self):\n self._scale(scale=0.9)\n self.fatorMovimento = self.fatorMovimento * 0.9\n\n def zoomOut(self):\n self._scale(scale=1.1)\n self.fatorMovimento = self.fatorMovimento * 1.1\n\n def _scale(self, scale=1):\n cx, cy = self.newCenter\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n transformations = numpy.matmul(translate_matrix_1, scale_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n transformations)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def rotateRight(self, angle):\n self._rotate(360 - angle)\n\n def rotateLeft(self, angle):\n self._rotate(angle)\n <mask token>\n <mask token>\n\n def applySCN(self, x, y):\n point_coords = numpy.array([x, y, 1])\n final_coords = numpy.matmul(point_coords, self.window_scn)\n return final_coords[0], final_coords[1]\n",
"step-2": "<mask token>\n\n\nclass Window:\n\n def __init__(self, world, xyw_min=None, xyw_max=None):\n self.world = world\n if xyw_min is None or xyw_max is None:\n self.xyw_min = -100, -100\n self.xyw_max = 100, 100\n else:\n if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:\n raise Exception(\n 'O param xyw_min deve ser uma tupla de 2 valores.')\n try:\n self.xyw_min = float(xyw_min[0]), float(xyw_min[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_min devem ser pares de números.')\n if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:\n raise Exception(\n 'O param xyw_max deve ser uma tupla de 2 valores.')\n try:\n self.xyw_max = float(xyw_max[0]), float(xyw_max[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_max devem ser pares de números.')\n self.xyw_1 = self.xyw_min\n self.xyw_2 = self.xyw_max[0], self.xyw_min[1]\n self.xyw_3 = self.xyw_min[0], self.xyw_max[1]\n self.xyw_4 = self.xyw_max\n self.center = self.calcCenter()\n self.newCenter = self.center\n self.fatorMovimento = 10\n self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n self.degrees = 0\n self.scn()\n\n def set_xyw_min(self, xmin, ymin):\n self.xyw_min = xmin, ymin\n\n def set_xyw_max(self, xmax, ymax):\n self.xyw_max = xmax, ymax\n <mask token>\n\n def getCoords(self) ->(float, float, float, float):\n return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[\n 1]\n <mask token>\n\n def moveUp(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * sin, dy=self.\n fatorMovimento * cos)\n\n def moveDown(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * sin, dy=-1 * self.\n fatorMovimento * cos)\n\n def moveRight(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * cos, dy=-1 * self.\n fatorMovimento * sin)\n\n def moveLeft(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * cos, dy=self.\n fatorMovimento * sin)\n\n def _translate(self, dx=0, dy=0):\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n translate_matrix)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def zoomIn(self):\n self._scale(scale=0.9)\n self.fatorMovimento = self.fatorMovimento * 0.9\n\n def zoomOut(self):\n self._scale(scale=1.1)\n self.fatorMovimento = self.fatorMovimento * 1.1\n\n def _scale(self, scale=1):\n cx, cy = self.newCenter\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n transformations = numpy.matmul(translate_matrix_1, scale_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n transformations)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def rotateRight(self, angle):\n self._rotate(360 - angle)\n\n def rotateLeft(self, angle):\n self._rotate(angle)\n <mask token>\n <mask token>\n\n def applySCN(self, x, y):\n point_coords = numpy.array([x, y, 1])\n final_coords = numpy.matmul(point_coords, self.window_scn)\n return final_coords[0], final_coords[1]\n",
"step-3": "<mask token>\n\n\nclass Window:\n\n def __init__(self, world, xyw_min=None, xyw_max=None):\n self.world = world\n if xyw_min is None or xyw_max is None:\n self.xyw_min = -100, -100\n self.xyw_max = 100, 100\n else:\n if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:\n raise Exception(\n 'O param xyw_min deve ser uma tupla de 2 valores.')\n try:\n self.xyw_min = float(xyw_min[0]), float(xyw_min[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_min devem ser pares de números.')\n if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:\n raise Exception(\n 'O param xyw_max deve ser uma tupla de 2 valores.')\n try:\n self.xyw_max = float(xyw_max[0]), float(xyw_max[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_max devem ser pares de números.')\n self.xyw_1 = self.xyw_min\n self.xyw_2 = self.xyw_max[0], self.xyw_min[1]\n self.xyw_3 = self.xyw_min[0], self.xyw_max[1]\n self.xyw_4 = self.xyw_max\n self.center = self.calcCenter()\n self.newCenter = self.center\n self.fatorMovimento = 10\n self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n self.degrees = 0\n self.scn()\n\n def set_xyw_min(self, xmin, ymin):\n self.xyw_min = xmin, ymin\n\n def set_xyw_max(self, xmax, ymax):\n self.xyw_max = xmax, ymax\n\n def calcCenter(self) ->(float, float):\n return (self.xyw_min[0] + self.xyw_max[0]) / 2, (self.xyw_min[1] +\n self.xyw_max[1]) / 2\n\n def getCoords(self) ->(float, float, float, float):\n return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[\n 1]\n\n def getWindowDimensions(self) ->(float, float):\n xyw1 = numpy.array([self.xyw_1[0], self.xyw_1[1]])\n xyw2 = numpy.array([self.xyw_2[0], self.xyw_2[1]])\n xyw3 = numpy.array([self.xyw_3[0], self.xyw_3[1]])\n return numpy.linalg.norm(xyw2 - xyw1), numpy.linalg.norm(xyw3 - xyw1)\n\n def moveUp(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * sin, dy=self.\n fatorMovimento * cos)\n\n def moveDown(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * sin, dy=-1 * self.\n fatorMovimento * cos)\n\n def moveRight(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * cos, dy=-1 * self.\n fatorMovimento * sin)\n\n def moveLeft(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * cos, dy=self.\n fatorMovimento * sin)\n\n def _translate(self, dx=0, dy=0):\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n translate_matrix)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def zoomIn(self):\n self._scale(scale=0.9)\n self.fatorMovimento = self.fatorMovimento * 0.9\n\n def zoomOut(self):\n self._scale(scale=1.1)\n self.fatorMovimento = self.fatorMovimento * 1.1\n\n def _scale(self, scale=1):\n cx, cy = self.newCenter\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n transformations = numpy.matmul(translate_matrix_1, scale_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n transformations)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def rotateRight(self, angle):\n self._rotate(360 - angle)\n\n def rotateLeft(self, angle):\n self._rotate(angle)\n <mask token>\n <mask token>\n\n def applySCN(self, x, y):\n point_coords = numpy.array([x, y, 1])\n final_coords = numpy.matmul(point_coords, self.window_scn)\n return final_coords[0], final_coords[1]\n",
"step-4": "<mask token>\n\n\nclass Window:\n\n def __init__(self, world, xyw_min=None, xyw_max=None):\n self.world = world\n if xyw_min is None or xyw_max is None:\n self.xyw_min = -100, -100\n self.xyw_max = 100, 100\n else:\n if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:\n raise Exception(\n 'O param xyw_min deve ser uma tupla de 2 valores.')\n try:\n self.xyw_min = float(xyw_min[0]), float(xyw_min[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_min devem ser pares de números.')\n if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:\n raise Exception(\n 'O param xyw_max deve ser uma tupla de 2 valores.')\n try:\n self.xyw_max = float(xyw_max[0]), float(xyw_max[1])\n except Exception:\n raise Exception(\n 'As coordenadas xyw_max devem ser pares de números.')\n self.xyw_1 = self.xyw_min\n self.xyw_2 = self.xyw_max[0], self.xyw_min[1]\n self.xyw_3 = self.xyw_min[0], self.xyw_max[1]\n self.xyw_4 = self.xyw_max\n self.center = self.calcCenter()\n self.newCenter = self.center\n self.fatorMovimento = 10\n self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n self.degrees = 0\n self.scn()\n\n def set_xyw_min(self, xmin, ymin):\n self.xyw_min = xmin, ymin\n\n def set_xyw_max(self, xmax, ymax):\n self.xyw_max = xmax, ymax\n\n def calcCenter(self) ->(float, float):\n return (self.xyw_min[0] + self.xyw_max[0]) / 2, (self.xyw_min[1] +\n self.xyw_max[1]) / 2\n\n def getCoords(self) ->(float, float, float, float):\n return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[\n 1]\n\n def getWindowDimensions(self) ->(float, float):\n xyw1 = numpy.array([self.xyw_1[0], self.xyw_1[1]])\n xyw2 = numpy.array([self.xyw_2[0], self.xyw_2[1]])\n xyw3 = numpy.array([self.xyw_3[0], self.xyw_3[1]])\n return numpy.linalg.norm(xyw2 - xyw1), numpy.linalg.norm(xyw3 - xyw1)\n\n def moveUp(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * sin, dy=self.\n fatorMovimento * cos)\n\n def moveDown(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * sin, dy=-1 * self.\n fatorMovimento * cos)\n\n def moveRight(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=-1 * self.fatorMovimento * cos, dy=-1 * self.\n fatorMovimento * sin)\n\n def moveLeft(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * cos, dy=self.\n fatorMovimento * sin)\n\n def _translate(self, dx=0, dy=0):\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n translate_matrix)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def zoomIn(self):\n self._scale(scale=0.9)\n self.fatorMovimento = self.fatorMovimento * 0.9\n\n def zoomOut(self):\n self._scale(scale=1.1)\n self.fatorMovimento = self.fatorMovimento * 1.1\n\n def _scale(self, scale=1):\n cx, cy = self.newCenter\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n transformations = numpy.matmul(translate_matrix_1, scale_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n transformations)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def rotateRight(self, angle):\n self._rotate(360 - angle)\n\n def rotateLeft(self, angle):\n self._rotate(angle)\n\n def _rotate(self, angle=0):\n self.degrees = (self.degrees + angle) % 360\n cx, cy = self.newCenter\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1], [\n self.xyw_2[0], self.xyw_2[1], 1], [self.xyw_3[0], self.xyw_3[1],\n 1], [self.xyw_4[0], self.xyw_4[1], 1]])\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n radians = numpy.radians(angle)\n sin = numpy.sin(radians)\n cos = numpy.cos(radians)\n rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n transformations = numpy.matmul(translate_matrix_1, rotate_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords,\n transformations)\n self.xyw_1 = xyw_1[0], xyw_1[1]\n self.xyw_2 = xyw_2[0], xyw_2[1]\n self.xyw_3 = xyw_3[0], xyw_3[1]\n self.xyw_4 = xyw_4[0], xyw_4[1]\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n self.newCenter = self.calcCenter()\n self.scn()\n\n def scn(self):\n cx, cy = self.newCenter\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [-1 * cx, -\n 1 * cy, 1]])\n radians = numpy.radians(-1 * self.degrees)\n sin = numpy.sin(radians)\n cos = numpy.cos(radians)\n rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])\n length, height = self.getWindowDimensions()\n sx = 1 / (length / 2)\n sy = 1 / (height / 2)\n scale_matrix = numpy.array([[sx, 0, 0], [0, sy, 0], [0, 0, 1]])\n scn = numpy.matmul(translate_matrix_1, rotate_matrix)\n self.window_scn = numpy.matmul(scn, scale_matrix)\n\n def applySCN(self, x, y):\n point_coords = numpy.array([x, y, 1])\n final_coords = numpy.matmul(point_coords, self.window_scn)\n return final_coords[0], final_coords[1]\n",
"step-5": "import numpy\nfrom math import cos, sin, radians, tan\n\nclass Window:\n # construtor\n def __init__(self, world, xyw_min=None, xyw_max=None):\n self.world = world\n # caso em q é None\n if xyw_min is None or xyw_max is None:\n self.xyw_min = (-100, -100)\n self.xyw_max = (100, 100)\n # caso em q n é None\n else:\n if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:\n raise Exception('O param xyw_min deve ser uma tupla de 2 valores.')\n try:\n self.xyw_min = (float(xyw_min[0]), float(xyw_min[1]))\n except Exception:\n raise Exception('As coordenadas xyw_min devem ser pares de números.')\n\n if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:\n raise Exception('O param xyw_max deve ser uma tupla de 2 valores.')\n try:\n self.xyw_max = (float(xyw_max[0]), float(xyw_max[1]))\n except Exception:\n raise Exception('As coordenadas xyw_max devem ser pares de números.')\n self.xyw_1 = self.xyw_min\n self.xyw_2 = (self.xyw_max[0], self.xyw_min[1])\n self.xyw_3 = (self.xyw_min[0], self.xyw_max[1])\n self.xyw_4 = self.xyw_max\n # define o centro original da window(attr q pode ser usado para trazer a view de volta ao seu centro original)\n self.center = self.calcCenter()\n # define o novo centro(var que pode ser utilizada em futuros calculos envolvendo o centro da window)\n self.newCenter = self.center\n self.fatorMovimento = 10\n self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n # inicializa scn da window\n self.degrees = 0\n self.scn()\n\n def set_xyw_min(self, xmin, ymin):\n self.xyw_min = (xmin, ymin)\n\n def set_xyw_max(self, xmax, ymax):\n self.xyw_max = (xmax, ymax)\n\n # retorna as coordenadas (x,y) do centro da window\n def calcCenter(self) -> (float, float):\n return (self.xyw_min[0] + self.xyw_max[0]) / 2, (self.xyw_min[1] + self.xyw_max[1]) / 2\n\n # retorna as coordenadas do canto inferior esquerdo e canto superior direito da window\n def getCoords(self) -> (float, float, float, float):\n return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[1]\n\n # retorna a largura e profundidade da window\n def getWindowDimensions(self) -> (float, float):\n xyw1 = numpy.array([self.xyw_1[0], self.xyw_1[1]])\n xyw2 = numpy.array([self.xyw_2[0], self.xyw_2[1]])\n xyw3 = numpy.array([self.xyw_3[0], self.xyw_3[1]])\n return numpy.linalg.norm(xyw2 - xyw1), numpy.linalg.norm(xyw3 - xyw1)\n\n # translada a window para cima, do ponto de vista do usuario\n def moveUp(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * sin, dy=self.fatorMovimento * cos)\n\n # translada a window para baixo, do ponto de vista do usuario\n def moveDown(self):\n rad_angle = numpy.radians(self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=(-1) * self.fatorMovimento * sin, dy=(-1) * self.fatorMovimento * cos)\n\n # translada a window para direita, do ponto de vista do usuario\n def moveRight(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=(-1) * self.fatorMovimento * cos, dy=(-1) * self.fatorMovimento * sin)\n\n # translada a window para esquerda, do ponto de vista do usuario\n def moveLeft(self):\n rad_angle = numpy.radians(180 - self.degrees)\n sin = numpy.sin(rad_angle)\n cos = numpy.cos(rad_angle)\n self._translate(dx=self.fatorMovimento * cos, dy=self.fatorMovimento * sin)\n\n # realiza a translaçao da window\n def _translate(self, dx=0, dy=0):\n # cria a matriz de translacao do obj para um dx e dy qualquer\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],\n [self.xyw_2[0], self.xyw_2[1], 1],\n [self.xyw_3[0], self.xyw_3[1], 1],\n [self.xyw_4[0], self.xyw_4[1], 1]])\n # realiza a translacao\n translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])\n # atualiza a window\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, translate_matrix)\n self.xyw_1 = (xyw_1[0], xyw_1[1])\n self.xyw_2 = (xyw_2[0], xyw_2[1])\n self.xyw_3 = (xyw_3[0], xyw_3[1])\n self.xyw_4 = (xyw_4[0], xyw_4[1])\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n # atualiza o centro\n self.newCenter = self.calcCenter()\n # atualiza scn\n self.scn()\n\n # Encolhe a window\n def zoomIn(self):\n self._scale(scale=0.9)\n self.fatorMovimento = self.fatorMovimento * 0.9\n\n # Aumenta a window\n def zoomOut(self):\n self._scale(scale=1.1)\n self.fatorMovimento = self.fatorMovimento * 1.1\n\n # Escalona a window\n def _scale(self, scale=1):\n # centro do obj\n cx, cy = self.newCenter\n # coords do mundo\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],\n [self.xyw_2[0], self.xyw_2[1], 1],\n [self.xyw_3[0], self.xyw_3[1], 1],\n [self.xyw_4[0], self.xyw_4[1], 1]])\n # ajusta o centro do mundo com o obj\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])\n # realiza o escalonamento(num sei se esse e o termo correto)\n scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])\n # reverte o ajuste do centro do mundo com o obj\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n # monta uma matriz que aplica todas as transformacoes\n transformations = numpy.matmul(translate_matrix_1, scale_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n # aplica as transformacoes\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)\n # atualiza xyw_min/max\n self.xyw_1 = (xyw_1[0], xyw_1[1])\n self.xyw_2 = (xyw_2[0], xyw_2[1])\n self.xyw_3 = (xyw_3[0], xyw_3[1])\n self.xyw_4 = (xyw_4[0], xyw_4[1])\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n # atualiza o centro\n self.newCenter = self.calcCenter()\n # atualiza scn\n self.scn()\n\n # Rotaciona a window no sentido horario\n def rotateRight(self, angle):\n # 360 - 10 = 350\n self._rotate(360 - angle)\n\n # Rotaciona a window no sentido anti-horario\n def rotateLeft(self, angle):\n self._rotate(angle)\n\n # Rotaciona a window em relaçao ao seu proprio centro\n def _rotate(self, angle=0):\n self.degrees = (self.degrees + angle) % 360\n # centro do obj\n cx, cy = self.newCenter\n # coords do mundo\n window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],\n [self.xyw_2[0], self.xyw_2[1], 1],\n [self.xyw_3[0], self.xyw_3[1], 1],\n [self.xyw_4[0], self.xyw_4[1], 1]])\n # ajusta o centro do mundo com o obj\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])\n # realiza a rotacao\n radians = numpy.radians(angle)\n sin = numpy.sin(radians)\n cos = numpy.cos(radians)\n rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])\n # reverte a transformacao feita\n translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])\n # gera a matriz de transformacao de rotacao\n transformations = numpy.matmul(translate_matrix_1, rotate_matrix)\n transformations = numpy.matmul(transformations, translate_matrix_2)\n # aplica as transformacoes\n xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)\n # atualiza xyw_min/max\n self.xyw_1 = (xyw_1[0], xyw_1[1])\n self.xyw_2 = (xyw_2[0], xyw_2[1])\n self.xyw_3 = (xyw_3[0], xyw_3[1])\n self.xyw_4 = (xyw_4[0], xyw_4[1])\n self.xyw_min = self.xyw_1\n self.xyw_max = self.xyw_4\n # atualiza o centro\n self.newCenter = self.calcCenter()\n # atualiza scn\n self.scn()\n\n # Calcula a matriz de transformaçao de sistemas de coordenadas da window\n def scn(self):\n # centro do obj\n cx, cy = self.newCenter\n # ajusta o centro do mundo com o obj\n translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])\n # pega ao INVERSO da rotacao atual da window\n radians = numpy.radians((-1) * self.degrees)\n sin = numpy.sin(radians)\n cos = numpy.cos(radians)\n # rotaciona\n rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])\n length, height = self.getWindowDimensions()\n sx = 1 / (length / 2)\n sy = 1 / (height / 2)\n # realiza o escalonamento(num sei se esse e o termo correto)\n scale_matrix = numpy.array([[sx, 0, 0], [0, sy, 0], [0, 0, 1]])\n # gera a matriz de conversao para scn da window\n scn = numpy.matmul(translate_matrix_1, rotate_matrix)\n self.window_scn = numpy.matmul(scn, scale_matrix)\n\n # Aplica a matriz de transformaçao de sistema de coordenadas da window a um ponto qualquer\n def applySCN(self, x, y):\n point_coords = numpy.array([x, y, 1])\n final_coords = numpy.matmul(point_coords, self.window_scn)\n return final_coords[0], final_coords[1]\n",
"step-ids": [
15,
16,
18,
20,
22
]
}
|
[
15,
16,
18,
20,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
prov_config.enable_ssl(leaf_domain_label=https_cert)
<|reserved_special_token_0|>
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
workspace_name = ''
subscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
resource_group = 'XXXXXXXXXXXXXXXXX'
workspace_region = 'eastus2'
https_cert = 'XXXXX'
aks_name = 'XXXXXXX'
aks_service_name = 'XXXXXXXXX'
ws = Workspace.create(name=workspace_name, subscription_id=subscription_id,
resource_group=resource_group, location=workspace_region, exist_ok=True)
prov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')
prov_config.enable_ssl(leaf_domain_label=https_cert)
aks_target = ComputeTarget.create(workspace=ws, name=aks_name,
provisioning_configuration=prov_config)
inference_config = InferenceConfig(runtime='python', entry_script=
'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')
aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)
aks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=
inference_config, deployment_config=aks_python_bot, deployment_target=
aks_target, name=aks_service_name)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
<|reserved_special_token_1|>
from azureml.core.compute import AksCompute
from azureml.core.model import Model, InferenceConfig
from azureml.core.webservice import AksWebservice
workspace_name = ''
subscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
resource_group = 'XXXXXXXXXXXXXXXXX'
workspace_region = 'eastus2'
https_cert = 'XXXXX'
aks_name = 'XXXXXXX'
aks_service_name = 'XXXXXXXXX'
ws = Workspace.create(name=workspace_name, subscription_id=subscription_id,
resource_group=resource_group, location=workspace_region, exist_ok=True)
prov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')
prov_config.enable_ssl(leaf_domain_label=https_cert)
aks_target = ComputeTarget.create(workspace=ws, name=aks_name,
provisioning_configuration=prov_config)
inference_config = InferenceConfig(runtime='python', entry_script=
'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')
aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)
aks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=
inference_config, deployment_config=aks_python_bot, deployment_target=
aks_target, name=aks_service_name)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
<|reserved_special_token_1|>
from azureml.core.compute import AksCompute
from azureml.core.model import Model, InferenceConfig
from azureml.core.webservice import AksWebservice
workspace_name = ""
subscription_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
resource_group = "XXXXXXXXXXXXXXXXX"
workspace_region = "eastus2"
https_cert = "XXXXX"
aks_name = "XXXXXXX"
aks_service_name = 'XXXXXXXXX'
ws = Workspace.create(name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
exist_ok=True)
# Provision AKS cluster
prov_config = AksCompute.provisioning_configuration(vm_size="Standard_D14")
prov_config.enable_ssl(leaf_domain_label=https_cert)
# Create the cluster
aks_target = ComputeTarget.create(
workspace=ws, name=aks_name, provisioning_configuration=prov_config
)
inference_config = InferenceConfig(runtime="python",
entry_script="aml_app.py",
conda_file="myenv.yml",
extra_docker_file_steps='dockerfile'
)
aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3,
cpu_cores=2,
memory_gb=4,
auth_enabled=False)
aks_service = Model.deploy(ws,
models=['aml_app.py'],
inference_config=inference_config,
deployment_config=aks_python_bot,
deployment_target=aks_target,
name=aks_service_name)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
|
flexible
|
{
"blob_id": "2941ecde72325d46b5c3899d4b1a213daff67147",
"index": 2613,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprov_config.enable_ssl(leaf_domain_label=https_cert)\n<mask token>\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-3": "<mask token>\nworkspace_name = ''\nsubscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'\nresource_group = 'XXXXXXXXXXXXXXXXX'\nworkspace_region = 'eastus2'\nhttps_cert = 'XXXXX'\naks_name = 'XXXXXXX'\naks_service_name = 'XXXXXXXXX'\nws = Workspace.create(name=workspace_name, subscription_id=subscription_id,\n resource_group=resource_group, location=workspace_region, exist_ok=True)\nprov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')\nprov_config.enable_ssl(leaf_domain_label=https_cert)\naks_target = ComputeTarget.create(workspace=ws, name=aks_name,\n provisioning_configuration=prov_config)\ninference_config = InferenceConfig(runtime='python', entry_script=\n 'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)\naks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=\n inference_config, deployment_config=aks_python_bot, deployment_target=\n aks_target, name=aks_service_name)\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-4": "from azureml.core.compute import AksCompute\nfrom azureml.core.model import Model, InferenceConfig\nfrom azureml.core.webservice import AksWebservice\nworkspace_name = ''\nsubscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'\nresource_group = 'XXXXXXXXXXXXXXXXX'\nworkspace_region = 'eastus2'\nhttps_cert = 'XXXXX'\naks_name = 'XXXXXXX'\naks_service_name = 'XXXXXXXXX'\nws = Workspace.create(name=workspace_name, subscription_id=subscription_id,\n resource_group=resource_group, location=workspace_region, exist_ok=True)\nprov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')\nprov_config.enable_ssl(leaf_domain_label=https_cert)\naks_target = ComputeTarget.create(workspace=ws, name=aks_name,\n provisioning_configuration=prov_config)\ninference_config = InferenceConfig(runtime='python', entry_script=\n 'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)\naks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=\n inference_config, deployment_config=aks_python_bot, deployment_target=\n aks_target, name=aks_service_name)\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-5": "from azureml.core.compute import AksCompute\nfrom azureml.core.model import Model, InferenceConfig\nfrom azureml.core.webservice import AksWebservice\n\nworkspace_name = \"\"\nsubscription_id = \"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nresource_group = \"XXXXXXXXXXXXXXXXX\"\nworkspace_region = \"eastus2\"\nhttps_cert = \"XXXXX\"\naks_name = \"XXXXXXX\"\naks_service_name = 'XXXXXXXXX'\n\nws = Workspace.create(name=workspace_name,\n subscription_id=subscription_id,\n resource_group=resource_group,\n location=workspace_region,\n exist_ok=True)\n\n# Provision AKS cluster\nprov_config = AksCompute.provisioning_configuration(vm_size=\"Standard_D14\")\nprov_config.enable_ssl(leaf_domain_label=https_cert)\n# Create the cluster\naks_target = ComputeTarget.create(\n workspace=ws, name=aks_name, provisioning_configuration=prov_config\n)\n\ninference_config = InferenceConfig(runtime=\"python\",\n entry_script=\"aml_app.py\",\n conda_file=\"myenv.yml\",\n extra_docker_file_steps='dockerfile'\n )\n\n\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3,\n cpu_cores=2,\n memory_gb=4,\n auth_enabled=False)\n\naks_service = Model.deploy(ws,\n models=['aml_app.py'],\n inference_config=inference_config,\n deployment_config=aks_python_bot,\n deployment_target=aks_target,\n name=aks_service_name)\n\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
print("Welcome to the Python Calculator")
print("To stop calculator type: quit")
previous = 0
run = True
def perform_math():
'''(numbers) -> numbers
accepts numbers from the user and performs continuous
mathematical equations on them.
precondition input must be numbers and mathematical signs
'''
global run
global previous
equation = ""
if previous == 0:
equation = input("Type in an Equation:")
else:
equation = input(str(previous))
#Is it too much to want to figure out a way to "force" numerical input?
if equation == "quit":
run = False
else:
equation = re.sub('[a-zA-Z,:()" "]', '' , equation)
if previous == 0:
previous = eval(equation)
else:
previous = eval(str(previous) + equation)
while run:
perform_math()
|
normal
|
{
"blob_id": "4122da21abab462a28c925c1afa5792ec729a75a",
"index": 5087,
"step-1": "<mask token>\n\n\ndef perform_math():\n \"\"\"(numbers) -> numbers\n\n accepts numbers from the user and performs continuous\n mathematical equations on them.\n\n precondition input must be numbers and mathematical signs\n \n \"\"\"\n global run\n global previous\n equation = ''\n if previous == 0:\n equation = input('Type in an Equation:')\n else:\n equation = input(str(previous))\n if equation == 'quit':\n run = False\n else:\n equation = re.sub('[a-zA-Z,:()\" \"]', '', equation)\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Welcome to the Python Calculator')\nprint('To stop calculator type: quit')\n<mask token>\n\n\ndef perform_math():\n \"\"\"(numbers) -> numbers\n\n accepts numbers from the user and performs continuous\n mathematical equations on them.\n\n precondition input must be numbers and mathematical signs\n \n \"\"\"\n global run\n global previous\n equation = ''\n if previous == 0:\n equation = input('Type in an Equation:')\n else:\n equation = input(str(previous))\n if equation == 'quit':\n run = False\n else:\n equation = re.sub('[a-zA-Z,:()\" \"]', '', equation)\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n\n\nwhile run:\n perform_math()\n",
"step-3": "<mask token>\nprint('Welcome to the Python Calculator')\nprint('To stop calculator type: quit')\nprevious = 0\nrun = True\n\n\ndef perform_math():\n \"\"\"(numbers) -> numbers\n\n accepts numbers from the user and performs continuous\n mathematical equations on them.\n\n precondition input must be numbers and mathematical signs\n \n \"\"\"\n global run\n global previous\n equation = ''\n if previous == 0:\n equation = input('Type in an Equation:')\n else:\n equation = input(str(previous))\n if equation == 'quit':\n run = False\n else:\n equation = re.sub('[a-zA-Z,:()\" \"]', '', equation)\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n\n\nwhile run:\n perform_math()\n",
"step-4": "import re\nprint('Welcome to the Python Calculator')\nprint('To stop calculator type: quit')\nprevious = 0\nrun = True\n\n\ndef perform_math():\n \"\"\"(numbers) -> numbers\n\n accepts numbers from the user and performs continuous\n mathematical equations on them.\n\n precondition input must be numbers and mathematical signs\n \n \"\"\"\n global run\n global previous\n equation = ''\n if previous == 0:\n equation = input('Type in an Equation:')\n else:\n equation = input(str(previous))\n if equation == 'quit':\n run = False\n else:\n equation = re.sub('[a-zA-Z,:()\" \"]', '', equation)\n if previous == 0:\n previous = eval(equation)\n else:\n previous = eval(str(previous) + equation)\n\n\nwhile run:\n perform_math()\n",
"step-5": "import re\r\n\r\nprint(\"Welcome to the Python Calculator\")\r\nprint(\"To stop calculator type: quit\")\r\n\r\nprevious = 0\r\nrun = True\r\n\r\ndef perform_math():\r\n '''(numbers) -> numbers\r\n\r\n accepts numbers from the user and performs continuous\r\n mathematical equations on them.\r\n\r\n precondition input must be numbers and mathematical signs\r\n \r\n '''\r\n \r\n global run\r\n global previous\r\n equation = \"\"\r\n if previous == 0:\r\n equation = input(\"Type in an Equation:\")\r\n else:\r\n equation = input(str(previous))\r\n \r\n #Is it too much to want to figure out a way to \"force\" numerical input?\r\n \r\n if equation == \"quit\":\r\n run = False\r\n else:\r\n equation = re.sub('[a-zA-Z,:()\" \"]', '' , equation)\r\n if previous == 0:\r\n previous = eval(equation)\r\n else:\r\n previous = eval(str(previous) + equation)\r\n \r\n\r\nwhile run:\r\n perform_math()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def summarize(text):
sentences_token = sent_tokenize(text)
vectorizer = CountVectorizer(min_df=1, decode_error='replace')
sent_bow = vectorizer.fit_transform(sentences_token)
transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)
sent_tfidf = transformer.fit_transform(sent_bow)
similarity_graph = sent_tfidf * sent_tfidf.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
text_rank_graph = sorted(((scores[i], s) for i, s in enumerate(
sentences_token)), reverse=True)
number_of_sents = int(0.4 * len(text_rank_graph))
del text_rank_graph[number_of_sents:]
summary = ' '.join(word for _, word in text_rank_graph)
return summary
<|reserved_special_token_1|>
from nltk.tokenize import sent_tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import networkx as nx
def summarize(text):
sentences_token = sent_tokenize(text)
vectorizer = CountVectorizer(min_df=1, decode_error='replace')
sent_bow = vectorizer.fit_transform(sentences_token)
transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)
sent_tfidf = transformer.fit_transform(sent_bow)
similarity_graph = sent_tfidf * sent_tfidf.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
text_rank_graph = sorted(((scores[i], s) for i, s in enumerate(
sentences_token)), reverse=True)
number_of_sents = int(0.4 * len(text_rank_graph))
del text_rank_graph[number_of_sents:]
summary = ' '.join(word for _, word in text_rank_graph)
return summary
<|reserved_special_token_1|>
from nltk.tokenize import sent_tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import networkx as nx
def summarize(text):
sentences_token = sent_tokenize(text)
#Feature Extraction
vectorizer = CountVectorizer(min_df=1,decode_error='replace')
sent_bow = vectorizer.fit_transform(sentences_token)
transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)
sent_tfidf = transformer.fit_transform(sent_bow)
similarity_graph = sent_tfidf * sent_tfidf.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
text_rank_graph = sorted(((scores[i],s) for i,s in enumerate(sentences_token)), reverse=True)
number_of_sents = int(0.4*len(text_rank_graph))
del text_rank_graph[number_of_sents:]
summary = ' '.join(word for _,word in text_rank_graph)
return summary
|
flexible
|
{
"blob_id": "b75ebcd278ae92274bbbe8d1ce5cb3bb7fa14a2c",
"index": 9637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef summarize(text):\n sentences_token = sent_tokenize(text)\n vectorizer = CountVectorizer(min_df=1, decode_error='replace')\n sent_bow = vectorizer.fit_transform(sentences_token)\n transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)\n sent_tfidf = transformer.fit_transform(sent_bow)\n similarity_graph = sent_tfidf * sent_tfidf.T\n nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)\n scores = nx.pagerank(nx_graph)\n text_rank_graph = sorted(((scores[i], s) for i, s in enumerate(\n sentences_token)), reverse=True)\n number_of_sents = int(0.4 * len(text_rank_graph))\n del text_rank_graph[number_of_sents:]\n summary = ' '.join(word for _, word in text_rank_graph)\n return summary\n",
"step-3": "from nltk.tokenize import sent_tokenize\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nimport networkx as nx\n\n\ndef summarize(text):\n sentences_token = sent_tokenize(text)\n vectorizer = CountVectorizer(min_df=1, decode_error='replace')\n sent_bow = vectorizer.fit_transform(sentences_token)\n transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)\n sent_tfidf = transformer.fit_transform(sent_bow)\n similarity_graph = sent_tfidf * sent_tfidf.T\n nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)\n scores = nx.pagerank(nx_graph)\n text_rank_graph = sorted(((scores[i], s) for i, s in enumerate(\n sentences_token)), reverse=True)\n number_of_sents = int(0.4 * len(text_rank_graph))\n del text_rank_graph[number_of_sents:]\n summary = ' '.join(word for _, word in text_rank_graph)\n return summary\n",
"step-4": "from nltk.tokenize import sent_tokenize\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nimport networkx as nx\n\ndef summarize(text):\n \n sentences_token = sent_tokenize(text)\n \n #Feature Extraction\n vectorizer = CountVectorizer(min_df=1,decode_error='replace')\n sent_bow = vectorizer.fit_transform(sentences_token)\n transformer = TfidfTransformer(norm='l2', smooth_idf=True, use_idf=True)\n sent_tfidf = transformer.fit_transform(sent_bow)\n \n similarity_graph = sent_tfidf * sent_tfidf.T\n \n nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)\n scores = nx.pagerank(nx_graph)\n text_rank_graph = sorted(((scores[i],s) for i,s in enumerate(sentences_token)), reverse=True)\n number_of_sents = int(0.4*len(text_rank_graph))\n del text_rank_graph[number_of_sents:]\n summary = ' '.join(word for _,word in text_rank_graph)\n \n return summary\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__('Underway')
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__('Paused')
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__('Completed')
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
self.available_count = 0
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return 'Name: %s, weight: %s' % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ', '.join([x.name for x in self.resources])
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return '\n'.join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = []
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
self.hard_assigned_resources = []
self.auto_assigned_resources = []
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
if self.auto_assigned_resources:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.auto_assigned_resources)))
else:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.resource_group)))
return '\n'.join(r)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State_New(State):
<|reserved_special_token_0|>
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__('Underway')
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__('Paused')
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__('Completed')
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
self.available_count = 0
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return 'Name: %s, weight: %s' % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ', '.join([x.name for x in self.resources])
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return '\n'.join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = []
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
self.hard_assigned_resources = []
self.auto_assigned_resources = []
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
if self.auto_assigned_resources:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.auto_assigned_resources)))
else:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.resource_group)))
return '\n'.join(r)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class State_New(State):
def __init__(self):
super(State_New, self).__init__('New')
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__('Underway')
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__('Paused')
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__('Completed')
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
self.available_count = 0
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return 'Name: %s, weight: %s' % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ', '.join([x.name for x in self.resources])
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return '\n'.join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = []
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
self.hard_assigned_resources = []
self.auto_assigned_resources = []
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
if self.auto_assigned_resources:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.auto_assigned_resources)))
else:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.resource_group)))
return '\n'.join(r)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class State_New(State):
def __init__(self):
super(State_New, self).__init__('New')
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__('Underway')
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__('Paused')
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__('Completed')
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
self.available_count = 0
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return 'Name: %s, weight: %s' % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ', '.join([x.name for x in self.resources])
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return '\n'.join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = []
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
self.hard_assigned_resources = []
self.auto_assigned_resources = []
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
if self.auto_assigned_resources:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.auto_assigned_resources)))
else:
r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.
duration), str(self.resource_group)))
return '\n'.join(r)
def flatten(tasks):
current_time = 0
running = True
while running:
needs_assignment = False
for t in tasks:
avail_resources = t.avail(current_time, t.resource_group)
if not needs_assignment:
running = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
import datetime, random
class State(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class State_New(State):
def __init__(self):
super(State_New, self).__init__("New")
class State_Underway(State):
def __init__(self):
super(State_Underway, self).__init__("Underway")
class State_Paused(State):
def __init__(self):
super(State_Paused, self).__init__("Paused")
class State_Completed(State):
def __init__(self):
super(State_Completed, self).__init__("Completed")
class TaskMan(object):
def __init__(self):
self.tasks = []
class Resource(object):
def __init__(self, name):
self.name = name
# the available_count is how many tasks this resource is available for.
self.available_count = 0
# The assigned count is how many times this resource has been used.
# For sorting, sort based on available+assigned, unless there are
# multiple resources at the same value
self.assigned_count = 0
def __lt__(self, other):
return self.available_count < other.available_count
def __cmp__(self, other):
return cmp(self.available_count, other.available_count)
def __str__(self):
return "Name: %s, weight: %s" % (self.name, self.available_count)
class ResourceGroup(object):
def __init__(self, *resources):
self.resources = set(resources)
def __str__(self):
return ", ".join([x.name for x in self.resources])
#return str(self.resources)
class ResourceManager(object):
def __init__(self):
self.resources = set()
def add(self, r):
self.resources.add(r)
def __str__(self):
r = []
for res in self.resources:
r.append(str(res))
return "\n".join(r)
class Task(object):
s_new = State_New()
s_underway = State_Underway()
s_paused = State_Paused()
s_completed = State_Completed()
def __init__(self, name, duration=4, numworkers=1, resource_group=None):
self.work_units = [] # work units applied so far
self.name = name
self.predecessors = []
self.successors = []
self.state = self.s_new
self.resource_group = resource_group
self.duration = duration
self.numworkers = numworkers
self.start_offset = 0
# hard assigned resources are those designated by the user, and are not
# subject to change by the program.
self.hard_assigned_resources = []
# auto_assigned resources are those designated by the program and may be
# changed at any time until the task has begun. Once the task has begun,
# the resource becomes hard_assigned and must be changed manually if it
# needs to be changed.
self.auto_assigned_resources = []
# A task may be waiting to start, underway, paused or completed.
# Each state change could be accompanied by a comment. If a task is
# paused because it is waiting for either another resource, the
# completion of another task, or some 3rd party action, that should be
# noted in a comment.
# def assign(self):
# self.resource_group.sort()
# for x in range(self.numworkers):
# self.auto_assigned_resources.append(self.resource_group.resources[x])
def avail(self, time, resource_group):
"""
Build a set of resources who are available for a given time. It might
make more sense to work based on a given restricted resource set.
"""
a = set()
for r in self.resource_group.resources:
pass
def __str__(self):
r = []
#r.append("Task: %s" % self.name)
#r.append(" State: %s" % self.state)
#r.append(" Hard Resources: %s" % str(self.hard_assigned_resources))
#r.append(" Auto Resources: %s" % str(self.auto_assigned_resources))
#r.append(" Resource Group: %s" % str(self.resource_group))
if self.auto_assigned_resources:
r.append("%s%s %s" % (self.start_offset*" ", str("-"*self.duration),
str(self.auto_assigned_resources)))
else:
r.append("%s%s %s" % (self.start_offset*" ", str("-"*self.duration),
str(self.resource_group)))
#str(datetime.timedelta(minutes=self.duration*15)))
return "\n".join(r)
def flatten(tasks):
# Because resources may be shared across multiple projects, when flattening
# you need to take that into account.
# I think actually that flattening a set of projects simultaneously would
# probably be a good thing. This would allow us to maximize the efficiency
# of resource allocation.
# This won't always be possible, some people will have outside committments
# that cannot be shifted, and this needs to be taken into account when
# assigning them.
current_time = 0
running = True
while running:
needs_assignment = False
for t in tasks:
avail_resources = t.avail(current_time, t.resource_group)
if not needs_assignment:
running = False
if __name__ == '__main__':
rm = ResourceManager()
a = Resource("A")
b = Resource("B")
c = Resource("C")
d = Resource("D")
rm.add(a)
rm.add(b)
rm.add(c)
rm.add(d)
# numtasks = int(random.random()*20)
numtasks = 20
tasks = []
for x in range(numtasks):
fg = [a,b,c,d]
random.shuffle(fg)
#print("Fullgroup: %s" % ", ".join([str(x) for x in fg]))
group = fg[:int(random.random()*3)+1]
duration = int(random.random()*32)+1
#print("Group: %s" % ", ".join([str(x) for x in group]))
t = Task("Prepare Report",duration=duration,
resource_group = ResourceGroup(*group))
tasks.append(t)
for t in tasks:
print(str(t))
# -------------------
# 1. Create a list of resources
# 2. Create a list of tasks
# 3. Create a resource group for each set of tasks
# 4. Auto assign resources to tasks and level the tasks
# So, first, go through all the tasks and weight each resource with how many
# times they appear as available
for t in tasks:
for r in t.resource_group.resources:
r.available_count += 1
# -------------------
# As we lay out tasks, we are at a "current time" point. Once all resources
# are assigned for the current time point, we find the next nearest time
# point when a resource becomes free - at the end of the shortest next task.
# Then we begin looking at assignments again.
#
# So we start at CT=0, and go through each unassigned task.
# When we get to an unassigned task, see if any of the resources assigned to
# it are available at this time.
# If so, take the set of available resources, sort in inverse
# weight order, and choose the first.
#
# After every assignment, add one to the weight of the resource. The idea is
# to bias the resource against being assigned again, until other less
# assigned resources catch up. The only thing I would be afraid of would be
# a resource who is available across many tasks not getting assigned to any
# because his score is too high. Maybe it would be best to keep two tallys -
# the number of available, and the number of assignments, and when sorting
# in preference order, order first by (avail+assigned), and then within a
# given group, order by assigned. This way, if someone has 7 availability
# slots and 0 assigned slots, they will get chosen before someone with 5
# availability slots and 2 assigned slots.
flatten(tasks)
print(str(rm))
# If someone is working on something and they get blocked waiting for
# something (another task or an outside supplier) then the task needs to be
# marked as "blocked/paused" and the assigned tasks shuffled accordingly.
#
# So the idea is that on your smartphone, you can always bring up a "What do
# I do now" display, which is sensitive to task priorities and stalls.
# Another thing I'd really like to try to take into account as much as
# possible is the fact that switching mental contexts between projects is an
# uncomfortable and time consuming process, so we'd want to minimize that
# as much as possible. Probably something like, try to switch projects no
# more often than once every 2 hours (8 work blocks).
|
flexible
|
{
"blob_id": "e40b34f0ee51cc14615c6225a7676929e6d2876a",
"index": 2975,
"step-1": "<mask token>\n\n\nclass State_Underway(State):\n\n def __init__(self):\n super(State_Underway, self).__init__('Underway')\n\n\nclass State_Paused(State):\n\n def __init__(self):\n super(State_Paused, self).__init__('Paused')\n\n\nclass State_Completed(State):\n\n def __init__(self):\n super(State_Completed, self).__init__('Completed')\n\n\nclass TaskMan(object):\n\n def __init__(self):\n self.tasks = []\n\n\nclass Resource(object):\n\n def __init__(self, name):\n self.name = name\n self.available_count = 0\n self.assigned_count = 0\n\n def __lt__(self, other):\n return self.available_count < other.available_count\n\n def __cmp__(self, other):\n return cmp(self.available_count, other.available_count)\n\n def __str__(self):\n return 'Name: %s, weight: %s' % (self.name, self.available_count)\n\n\nclass ResourceGroup(object):\n\n def __init__(self, *resources):\n self.resources = set(resources)\n\n def __str__(self):\n return ', '.join([x.name for x in self.resources])\n\n\nclass ResourceManager(object):\n\n def __init__(self):\n self.resources = set()\n\n def add(self, r):\n self.resources.add(r)\n\n def __str__(self):\n r = []\n for res in self.resources:\n r.append(str(res))\n return '\\n'.join(r)\n\n\nclass Task(object):\n s_new = State_New()\n s_underway = State_Underway()\n s_paused = State_Paused()\n s_completed = State_Completed()\n\n def __init__(self, name, duration=4, numworkers=1, resource_group=None):\n self.work_units = []\n self.name = name\n self.predecessors = []\n self.successors = []\n self.state = self.s_new\n self.resource_group = resource_group\n self.duration = duration\n self.numworkers = numworkers\n self.start_offset = 0\n self.hard_assigned_resources = []\n self.auto_assigned_resources = []\n\n def avail(self, time, resource_group):\n \"\"\"\n Build a set of resources who are available for a given time. It might\n make more sense to work based on a given restricted resource set.\n \"\"\"\n a = set()\n for r in self.resource_group.resources:\n pass\n\n def __str__(self):\n r = []\n if self.auto_assigned_resources:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.auto_assigned_resources)))\n else:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.resource_group)))\n return '\\n'.join(r)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass State_New(State):\n <mask token>\n\n\nclass State_Underway(State):\n\n def __init__(self):\n super(State_Underway, self).__init__('Underway')\n\n\nclass State_Paused(State):\n\n def __init__(self):\n super(State_Paused, self).__init__('Paused')\n\n\nclass State_Completed(State):\n\n def __init__(self):\n super(State_Completed, self).__init__('Completed')\n\n\nclass TaskMan(object):\n\n def __init__(self):\n self.tasks = []\n\n\nclass Resource(object):\n\n def __init__(self, name):\n self.name = name\n self.available_count = 0\n self.assigned_count = 0\n\n def __lt__(self, other):\n return self.available_count < other.available_count\n\n def __cmp__(self, other):\n return cmp(self.available_count, other.available_count)\n\n def __str__(self):\n return 'Name: %s, weight: %s' % (self.name, self.available_count)\n\n\nclass ResourceGroup(object):\n\n def __init__(self, *resources):\n self.resources = set(resources)\n\n def __str__(self):\n return ', '.join([x.name for x in self.resources])\n\n\nclass ResourceManager(object):\n\n def __init__(self):\n self.resources = set()\n\n def add(self, r):\n self.resources.add(r)\n\n def __str__(self):\n r = []\n for res in self.resources:\n r.append(str(res))\n return '\\n'.join(r)\n\n\nclass Task(object):\n s_new = State_New()\n s_underway = State_Underway()\n s_paused = State_Paused()\n s_completed = State_Completed()\n\n def __init__(self, name, duration=4, numworkers=1, resource_group=None):\n self.work_units = []\n self.name = name\n self.predecessors = []\n self.successors = []\n self.state = self.s_new\n self.resource_group = resource_group\n self.duration = duration\n self.numworkers = numworkers\n self.start_offset = 0\n self.hard_assigned_resources = []\n self.auto_assigned_resources = []\n\n def avail(self, time, resource_group):\n \"\"\"\n Build a set of resources who are available for a given time. It might\n make more sense to work based on a given restricted resource set.\n \"\"\"\n a = set()\n for r in self.resource_group.resources:\n pass\n\n def __str__(self):\n r = []\n if self.auto_assigned_resources:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.auto_assigned_resources)))\n else:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.resource_group)))\n return '\\n'.join(r)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n\nclass State_New(State):\n\n def __init__(self):\n super(State_New, self).__init__('New')\n\n\nclass State_Underway(State):\n\n def __init__(self):\n super(State_Underway, self).__init__('Underway')\n\n\nclass State_Paused(State):\n\n def __init__(self):\n super(State_Paused, self).__init__('Paused')\n\n\nclass State_Completed(State):\n\n def __init__(self):\n super(State_Completed, self).__init__('Completed')\n\n\nclass TaskMan(object):\n\n def __init__(self):\n self.tasks = []\n\n\nclass Resource(object):\n\n def __init__(self, name):\n self.name = name\n self.available_count = 0\n self.assigned_count = 0\n\n def __lt__(self, other):\n return self.available_count < other.available_count\n\n def __cmp__(self, other):\n return cmp(self.available_count, other.available_count)\n\n def __str__(self):\n return 'Name: %s, weight: %s' % (self.name, self.available_count)\n\n\nclass ResourceGroup(object):\n\n def __init__(self, *resources):\n self.resources = set(resources)\n\n def __str__(self):\n return ', '.join([x.name for x in self.resources])\n\n\nclass ResourceManager(object):\n\n def __init__(self):\n self.resources = set()\n\n def add(self, r):\n self.resources.add(r)\n\n def __str__(self):\n r = []\n for res in self.resources:\n r.append(str(res))\n return '\\n'.join(r)\n\n\nclass Task(object):\n s_new = State_New()\n s_underway = State_Underway()\n s_paused = State_Paused()\n s_completed = State_Completed()\n\n def __init__(self, name, duration=4, numworkers=1, resource_group=None):\n self.work_units = []\n self.name = name\n self.predecessors = []\n self.successors = []\n self.state = self.s_new\n self.resource_group = resource_group\n self.duration = duration\n self.numworkers = numworkers\n self.start_offset = 0\n self.hard_assigned_resources = []\n self.auto_assigned_resources = []\n\n def avail(self, time, resource_group):\n \"\"\"\n Build a set of resources who are available for a given time. It might\n make more sense to work based on a given restricted resource set.\n \"\"\"\n a = set()\n for r in self.resource_group.resources:\n pass\n\n def __str__(self):\n r = []\n if self.auto_assigned_resources:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.auto_assigned_resources)))\n else:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.resource_group)))\n return '\\n'.join(r)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n\nclass State_New(State):\n\n def __init__(self):\n super(State_New, self).__init__('New')\n\n\nclass State_Underway(State):\n\n def __init__(self):\n super(State_Underway, self).__init__('Underway')\n\n\nclass State_Paused(State):\n\n def __init__(self):\n super(State_Paused, self).__init__('Paused')\n\n\nclass State_Completed(State):\n\n def __init__(self):\n super(State_Completed, self).__init__('Completed')\n\n\nclass TaskMan(object):\n\n def __init__(self):\n self.tasks = []\n\n\nclass Resource(object):\n\n def __init__(self, name):\n self.name = name\n self.available_count = 0\n self.assigned_count = 0\n\n def __lt__(self, other):\n return self.available_count < other.available_count\n\n def __cmp__(self, other):\n return cmp(self.available_count, other.available_count)\n\n def __str__(self):\n return 'Name: %s, weight: %s' % (self.name, self.available_count)\n\n\nclass ResourceGroup(object):\n\n def __init__(self, *resources):\n self.resources = set(resources)\n\n def __str__(self):\n return ', '.join([x.name for x in self.resources])\n\n\nclass ResourceManager(object):\n\n def __init__(self):\n self.resources = set()\n\n def add(self, r):\n self.resources.add(r)\n\n def __str__(self):\n r = []\n for res in self.resources:\n r.append(str(res))\n return '\\n'.join(r)\n\n\nclass Task(object):\n s_new = State_New()\n s_underway = State_Underway()\n s_paused = State_Paused()\n s_completed = State_Completed()\n\n def __init__(self, name, duration=4, numworkers=1, resource_group=None):\n self.work_units = []\n self.name = name\n self.predecessors = []\n self.successors = []\n self.state = self.s_new\n self.resource_group = resource_group\n self.duration = duration\n self.numworkers = numworkers\n self.start_offset = 0\n self.hard_assigned_resources = []\n self.auto_assigned_resources = []\n\n def avail(self, time, resource_group):\n \"\"\"\n Build a set of resources who are available for a given time. It might\n make more sense to work based on a given restricted resource set.\n \"\"\"\n a = set()\n for r in self.resource_group.resources:\n pass\n\n def __str__(self):\n r = []\n if self.auto_assigned_resources:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.auto_assigned_resources)))\n else:\n r.append('%s%s %s' % (self.start_offset * ' ', str('-' * self.\n duration), str(self.resource_group)))\n return '\\n'.join(r)\n\n\ndef flatten(tasks):\n current_time = 0\n running = True\n while running:\n needs_assignment = False\n for t in tasks:\n avail_resources = t.avail(current_time, t.resource_group)\n if not needs_assignment:\n running = False\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\nimport datetime, random\n\nclass State(object):\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\nclass State_New(State):\n def __init__(self):\n super(State_New, self).__init__(\"New\")\n\nclass State_Underway(State):\n def __init__(self):\n super(State_Underway, self).__init__(\"Underway\")\n\nclass State_Paused(State):\n def __init__(self):\n super(State_Paused, self).__init__(\"Paused\")\n\nclass State_Completed(State):\n def __init__(self):\n super(State_Completed, self).__init__(\"Completed\")\n\nclass TaskMan(object):\n def __init__(self):\n self.tasks = []\n\nclass Resource(object):\n def __init__(self, name):\n self.name = name\n\n # the available_count is how many tasks this resource is available for.\n self.available_count = 0\n\n # The assigned count is how many times this resource has been used.\n # For sorting, sort based on available+assigned, unless there are\n # multiple resources at the same value\n self.assigned_count = 0\n\n def __lt__(self, other):\n return self.available_count < other.available_count\n\n def __cmp__(self, other):\n return cmp(self.available_count, other.available_count)\n\n def __str__(self):\n return \"Name: %s, weight: %s\" % (self.name, self.available_count)\n\nclass ResourceGroup(object):\n def __init__(self, *resources):\n self.resources = set(resources)\n\n def __str__(self):\n return \", \".join([x.name for x in self.resources])\n #return str(self.resources)\n\n\nclass ResourceManager(object):\n def __init__(self):\n self.resources = set()\n\n def add(self, r):\n self.resources.add(r)\n\n \n\n def __str__(self):\n r = []\n for res in self.resources:\n r.append(str(res))\n return \"\\n\".join(r)\n\nclass Task(object):\n s_new = State_New()\n s_underway = State_Underway()\n s_paused = State_Paused()\n s_completed = State_Completed()\n\n def __init__(self, name, duration=4, numworkers=1, resource_group=None):\n self.work_units = []\t# work units applied so far\n self.name = name\n self.predecessors = []\n self.successors = []\n self.state = self.s_new\n self.resource_group = resource_group\n self.duration = duration\n self.numworkers = numworkers\n self.start_offset = 0\n\n # hard assigned resources are those designated by the user, and are not\n # subject to change by the program.\n self.hard_assigned_resources = []\n\n # auto_assigned resources are those designated by the program and may be\n # changed at any time until the task has begun. Once the task has begun,\n # the resource becomes hard_assigned and must be changed manually if it\n # needs to be changed.\n self.auto_assigned_resources = []\n\n # A task may be waiting to start, underway, paused or completed.\n # Each state change could be accompanied by a comment. If a task is\n # paused because it is waiting for either another resource, the\n # completion of another task, or some 3rd party action, that should be\n # noted in a comment.\n\n # def assign(self):\n # self.resource_group.sort()\n # for x in range(self.numworkers):\n # self.auto_assigned_resources.append(self.resource_group.resources[x])\n\n def avail(self, time, resource_group):\n \"\"\"\n Build a set of resources who are available for a given time. It might\n make more sense to work based on a given restricted resource set.\n \"\"\"\n a = set()\n for r in self.resource_group.resources:\n pass\n \n def __str__(self):\n r = []\n #r.append(\"Task: %s\" % self.name)\n #r.append(\" State: %s\" % self.state)\n #r.append(\" Hard Resources: %s\" % str(self.hard_assigned_resources))\n #r.append(\" Auto Resources: %s\" % str(self.auto_assigned_resources))\n #r.append(\" Resource Group: %s\" % str(self.resource_group))\n\n if self.auto_assigned_resources:\n r.append(\"%s%s %s\" % (self.start_offset*\" \", str(\"-\"*self.duration),\n str(self.auto_assigned_resources)))\n else:\n r.append(\"%s%s %s\" % (self.start_offset*\" \", str(\"-\"*self.duration),\n str(self.resource_group)))\n #str(datetime.timedelta(minutes=self.duration*15)))\n return \"\\n\".join(r)\n\ndef flatten(tasks):\n # Because resources may be shared across multiple projects, when flattening\n # you need to take that into account.\n # I think actually that flattening a set of projects simultaneously would\n # probably be a good thing. This would allow us to maximize the efficiency\n # of resource allocation.\n # This won't always be possible, some people will have outside committments\n # that cannot be shifted, and this needs to be taken into account when\n # assigning them.\n\n current_time = 0\n running = True\n\n while running:\n needs_assignment = False\n for t in tasks:\n avail_resources = t.avail(current_time, t.resource_group)\n\n if not needs_assignment:\n running = False\n\n\nif __name__ == '__main__':\n rm = ResourceManager()\n\n a = Resource(\"A\")\n b = Resource(\"B\")\n c = Resource(\"C\")\n d = Resource(\"D\")\n\n rm.add(a)\n rm.add(b)\n rm.add(c)\n rm.add(d)\n\n# numtasks = int(random.random()*20)\n numtasks = 20\n tasks = []\n\n for x in range(numtasks):\n fg = [a,b,c,d]\n random.shuffle(fg)\n #print(\"Fullgroup: %s\" % \", \".join([str(x) for x in fg]))\n group = fg[:int(random.random()*3)+1]\n duration = int(random.random()*32)+1\n #print(\"Group: %s\" % \", \".join([str(x) for x in group]))\n t = Task(\"Prepare Report\",duration=duration,\n resource_group = ResourceGroup(*group))\n tasks.append(t)\n\n\n for t in tasks:\n print(str(t))\n\n # -------------------\n # 1. Create a list of resources\n # 2. Create a list of tasks\n # 3. Create a resource group for each set of tasks\n # 4. Auto assign resources to tasks and level the tasks\n\n # So, first, go through all the tasks and weight each resource with how many\n # times they appear as available\n\n for t in tasks:\n for r in t.resource_group.resources:\n r.available_count += 1\n\n # -------------------\n # As we lay out tasks, we are at a \"current time\" point. Once all resources\n # are assigned for the current time point, we find the next nearest time\n # point when a resource becomes free - at the end of the shortest next task.\n # Then we begin looking at assignments again.\n #\n # So we start at CT=0, and go through each unassigned task.\n # When we get to an unassigned task, see if any of the resources assigned to\n # it are available at this time.\n # If so, take the set of available resources, sort in inverse\n # weight order, and choose the first.\n #\n # After every assignment, add one to the weight of the resource. The idea is\n # to bias the resource against being assigned again, until other less\n # assigned resources catch up. The only thing I would be afraid of would be\n # a resource who is available across many tasks not getting assigned to any\n # because his score is too high. Maybe it would be best to keep two tallys -\n # the number of available, and the number of assignments, and when sorting\n # in preference order, order first by (avail+assigned), and then within a\n # given group, order by assigned. This way, if someone has 7 availability\n # slots and 0 assigned slots, they will get chosen before someone with 5\n # availability slots and 2 assigned slots.\n\n flatten(tasks)\n\n print(str(rm))\n\n # If someone is working on something and they get blocked waiting for\n # something (another task or an outside supplier) then the task needs to be\n # marked as \"blocked/paused\" and the assigned tasks shuffled accordingly.\n # \n # So the idea is that on your smartphone, you can always bring up a \"What do\n # I do now\" display, which is sensitive to task priorities and stalls.\n # Another thing I'd really like to try to take into account as much as\n # possible is the fact that switching mental contexts between projects is an\n # uncomfortable and time consuming process, so we'd want to minimize that\n # as much as possible. Probably something like, try to switch projects no\n # more often than once every 2 hours (8 work blocks).\n\n",
"step-ids": [
25,
26,
30,
31,
34
]
}
|
[
25,
26,
30,
31,
34
] |
class State(object):
def __init__(self, stateName, stateLevel):
self.stateName = stateName;
self.stateLevel = stateLevel;
|
normal
|
{
"blob_id": "73082ed2824ee65f7f4cbac47b9ebad19cec4196",
"index": 7226,
"step-1": "class State(object):\ndef __init__(self, stateName, stateLevel):\n self.stateName = stateName;\n self.stateLevel = stateLevel;\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def read_csv_json(file_name) ->pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
<|reserved_special_token_0|>
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,
stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=
va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for
i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.
categories_[0]))
model.compile(loss=losses.CategoricalCrossentropy(
label_smoothing=self.label_smoothing), optimizer=self.
model_cfg.get('optimizer', 'adam'))
callback = tf.keras.callbacks.EarlyStopping(monitor=
'val_loss', min_delta=0, patience=5, verbose=0, mode=
'auto', baseline=None, restore_best_weights=True)
print('start training')
history = model.fit(x_train, y_train, batch_size=self.
model_cfg['batch_size'], epochs=100, validation_split=
va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else
None, callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(
f"finished training in {len(history.history['loss'])} epochs"
)
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
return history.history
def __preprocess(self, dataset, le_encoder=None):
"""
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
"""
shuffle(dataset)
data = [s['data'] for s in dataset]
labels = [[s['label']] for s in dataset]
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown=
'ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.
get_feature_names()), len(data)))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, data, self.model_cfg['embedding_dims'])
x_train = vectorized_data
y_train = encoded_labels
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg[
'maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(filters, kernel_size, padding='valid', activation=
activation_type, strides=strides, input_shape=(maxlen,
embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
model, le = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.
model_cfg['maxlen'], self.model_cfg['embedding_dims']))
probs, preds = predict(self.session, self.graph, self.model,
vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.
item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.
le_encoder.categories_[0], probs[i]))} for i, r in enumerate(
results)]
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_csv_json(file_name) ->pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
<|reserved_special_token_0|>
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
"""
save model based on model, encoder
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, 'w') as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, 'log.json'), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, 'r') as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise 'Session is not initialized'
if graph is None:
raise 'Graph is not initialized'
if model is None:
raise 'Model is not initialized'
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return probs, preds
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,
stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=
va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for
i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.
categories_[0]))
model.compile(loss=losses.CategoricalCrossentropy(
label_smoothing=self.label_smoothing), optimizer=self.
model_cfg.get('optimizer', 'adam'))
callback = tf.keras.callbacks.EarlyStopping(monitor=
'val_loss', min_delta=0, patience=5, verbose=0, mode=
'auto', baseline=None, restore_best_weights=True)
print('start training')
history = model.fit(x_train, y_train, batch_size=self.
model_cfg['batch_size'], epochs=100, validation_split=
va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else
None, callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(
f"finished training in {len(history.history['loss'])} epochs"
)
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
return history.history
def __preprocess(self, dataset, le_encoder=None):
"""
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
"""
shuffle(dataset)
data = [s['data'] for s in dataset]
labels = [[s['label']] for s in dataset]
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown=
'ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.
get_feature_names()), len(data)))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, data, self.model_cfg['embedding_dims'])
x_train = vectorized_data
y_train = encoded_labels
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg[
'maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(filters, kernel_size, padding='valid', activation=
activation_type, strides=strides, input_shape=(maxlen,
embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
model, le = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.
model_cfg['maxlen'], self.model_cfg['embedding_dims']))
probs, preds = predict(self.session, self.graph, self.model,
vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.
item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.
le_encoder.categories_[0], probs[i]))} for i, r in enumerate(
results)]
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.disable_v2_behavior()
<|reserved_special_token_0|>
def read_csv_json(file_name) ->pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\\W^\'"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims
):
vectorized_data = []
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),
16) % 10 ** 6)
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
"""
save model based on model, encoder
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, 'w') as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, 'log.json'), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, 'r') as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise 'Session is not initialized'
if graph is None:
raise 'Graph is not initialized'
if model is None:
raise 'Model is not initialized'
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return probs, preds
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,
stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=
va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for
i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.
categories_[0]))
model.compile(loss=losses.CategoricalCrossentropy(
label_smoothing=self.label_smoothing), optimizer=self.
model_cfg.get('optimizer', 'adam'))
callback = tf.keras.callbacks.EarlyStopping(monitor=
'val_loss', min_delta=0, patience=5, verbose=0, mode=
'auto', baseline=None, restore_best_weights=True)
print('start training')
history = model.fit(x_train, y_train, batch_size=self.
model_cfg['batch_size'], epochs=100, validation_split=
va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else
None, callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(
f"finished training in {len(history.history['loss'])} epochs"
)
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
return history.history
def __preprocess(self, dataset, le_encoder=None):
"""
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
"""
shuffle(dataset)
data = [s['data'] for s in dataset]
labels = [[s['label']] for s in dataset]
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown=
'ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.
get_feature_names()), len(data)))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, data, self.model_cfg['embedding_dims'])
x_train = vectorized_data
y_train = encoded_labels
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg[
'maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(filters, kernel_size, padding='valid', activation=
activation_type, strides=strides, input_shape=(maxlen,
embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
model, le = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.
model_cfg['maxlen'], self.model_cfg['embedding_dims']))
probs, preds = predict(self.session, self.graph, self.model,
vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.
item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.
le_encoder.categories_[0], probs[i]))} for i, r in enumerate(
results)]
return output
<|reserved_special_token_1|>
from sklearn import preprocessing
from random import shuffle
import numpy as np
import collections
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras import backend as K
from gensim.models.keyedvectors import KeyedVectors
from nltk.tokenize import TreebankWordTokenizer
import re
import pickle
import os
import yaml
import pandas
from typing import List
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import losses, optimizers
from early_stopping import EarlyStoppingAtMaxMacroF1
import json
import hashlib
SEED = 7
def read_csv_json(file_name) ->pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\\W^\'"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims
):
vectorized_data = []
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),
16) % 10 ** 6)
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
"""
save model based on model, encoder
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, 'w') as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, 'log.json'), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, 'r') as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise 'Session is not initialized'
if graph is None:
raise 'Graph is not initialized'
if model is None:
raise 'Model is not initialized'
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return probs, preds
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,
stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=
va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for
i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for
i in range(len(df_tr))]
x_train, y_train, le_encoder = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.
categories_[0]))
model.compile(loss=losses.CategoricalCrossentropy(
label_smoothing=self.label_smoothing), optimizer=self.
model_cfg.get('optimizer', 'adam'))
callback = tf.keras.callbacks.EarlyStopping(monitor=
'val_loss', min_delta=0, patience=5, verbose=0, mode=
'auto', baseline=None, restore_best_weights=True)
print('start training')
history = model.fit(x_train, y_train, batch_size=self.
model_cfg['batch_size'], epochs=100, validation_split=
va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else
None, callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(
f"finished training in {len(history.history['loss'])} epochs"
)
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
return history.history
def __preprocess(self, dataset, le_encoder=None):
"""
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
"""
shuffle(dataset)
data = [s['data'] for s in dataset]
labels = [[s['label']] for s in dataset]
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown=
'ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.
get_feature_names()), len(data)))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, data, self.model_cfg['embedding_dims'])
x_train = vectorized_data
y_train = encoded_labels
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg[
'maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(filters, kernel_size, padding='valid', activation=
activation_type, strides=strides, input_shape=(maxlen,
embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
model, le = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.
vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.
model_cfg['maxlen'], self.model_cfg['embedding_dims']))
probs, preds = predict(self.session, self.graph, self.model,
vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.
item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.
le_encoder.categories_[0], probs[i]))} for i, r in enumerate(
results)]
return output
<|reserved_special_token_1|>
from sklearn import preprocessing
from random import shuffle
import numpy as np
import collections
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras import backend as K
from gensim.models.keyedvectors import KeyedVectors
from nltk.tokenize import TreebankWordTokenizer
import re
import pickle
import os
import yaml
import pandas
from typing import List
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import losses, optimizers
from early_stopping import EarlyStoppingAtMaxMacroF1
import json
import hashlib
SEED = 7
def read_csv_json(file_name) -> pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\W^\'\"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):
vectorized_data = []
# probably could be optimized further
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
# print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
# Create a vector of 0s the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
# Append the appropriate number 0 vectors to the list
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
'''
save model based on model, encoder
'''
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, "w") as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, "log.json"), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
#le = preprocessing.LabelEncoder()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise ("Session is not initialized")
if graph is None:
raise ("Graph is not initialized")
if model is None:
raise ("Model is not initialized")
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return (probs, preds)
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
(x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.categories_[0]))
model.compile(
loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),
#metrics=['categorical_accuracy'],
optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001
#optimizer=optimizers.Adam(learning_rate=5e-4)
)
# early stopping callback using validation loss
callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=5,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
)
#callback = EarlyStoppingAtMaxMacroF1(
# patience=100, # record all epochs
# validation=(x_va, y_va)
#)
print('start training')
history = model.fit(x_train, y_train,
batch_size=self.model_cfg['batch_size'],
epochs=100,
validation_split=va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else None,
callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(f'finished training in {len(history.history["loss"])} epochs')
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
# return training history
return history.history
def __preprocess(self, dataset, le_encoder=None):
'''
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
'''
shuffle(dataset)
data = [s['data'] for s in dataset]
#labels = [s['label'] for s in dataset]
labels = [[s['label']] for s in dataset]
#le_encoder = preprocessing.LabelEncoder()
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))
#print('train %s intents with %s samples' % (len(set(labels)), len(data)))
#print(collections.Counter(labels))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])
# split_point = int(len(vectorized_data) * .9)
x_train = vectorized_data # vectorized_data[:split_point]
y_train = encoded_labels # encoded_labels[:split_point]
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(
filters,
kernel_size,
padding='valid',
activation=activation_type,
strides=strides,
input_shape=(maxlen, embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
(model, le) = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
(probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i],
'embeddings': x_train[i],
#'label': r,
'label': r.item(),
'highestProb': max(probs[i]),
#'prob': dict(zip(self.le_encoder.classes_, probs[i]))
'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))
} for i, r in enumerate(results)]
return output
|
flexible
|
{
"blob_id": "23f491bbf26ede9052ecdab04b8c00cc78db5a7e",
"index": 8831,
"step-1": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-2": "<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\n<mask token>\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-3": "<mask token>\ntf.disable_v2_behavior()\n<mask token>\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-4": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\nSEED = 7\n\n\ndef read_csv_json(file_name) ->pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\\\W^\\'\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims\n ):\n vectorized_data = []\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(),\n 16) % 10 ** 6)\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n \"\"\"\n save model based on model, encoder\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, 'w') as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, 'log.json'), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, 'r') as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise 'Session is not initialized'\n if graph is None:\n raise 'Graph is not initialized'\n if model is None:\n raise 'Model is not initialized'\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return probs, preds\n\n\nclass Model:\n\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1,\n stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=\n va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for\n i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n x_va, y_va, _ = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for\n i in range(len(df_tr))]\n x_train, y_train, le_encoder = self.__preprocess(tr_dataset)\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.\n categories_[0]))\n model.compile(loss=losses.CategoricalCrossentropy(\n label_smoothing=self.label_smoothing), optimizer=self.\n model_cfg.get('optimizer', 'adam'))\n callback = tf.keras.callbacks.EarlyStopping(monitor=\n 'val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n print('start training')\n history = model.fit(x_train, y_train, batch_size=self.\n model_cfg['batch_size'], epochs=100, validation_split=\n va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else\n None, callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(\n f\"finished training in {len(history.history['loss'])} epochs\"\n )\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n return history.history\n\n def __preprocess(self, dataset, le_encoder=None):\n \"\"\"\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n \"\"\"\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n if le_encoder is None:\n le_encoder = preprocessing.OneHotEncoder(handle_unknown=\n 'ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.\n get_feature_names()), len(data)))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, data, self.model_cfg['embedding_dims'])\n x_train = vectorized_data\n y_train = encoded_labels\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg[\n 'maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n model.add(Conv1D(filters, kernel_size, padding='valid', activation=\n activation_type, strides=strides, input_shape=(maxlen,\n embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n model, le = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.\n vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.\n model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n probs, preds = predict(self.session, self.graph, self.model,\n vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i], 'embeddings': x_train[i], 'label': r.\n item(), 'highestProb': max(probs[i]), 'prob': dict(zip(self.\n le_encoder.categories_[0], probs[i]))} for i, r in enumerate(\n results)]\n return output\n",
"step-5": "from sklearn import preprocessing\nfrom random import shuffle\nimport numpy as np\nimport collections\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras import backend as K\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom nltk.tokenize import TreebankWordTokenizer\nimport re\nimport pickle\nimport os\n\nimport yaml\nimport pandas\nfrom typing import List\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras import losses, optimizers\nfrom early_stopping import EarlyStoppingAtMaxMacroF1\nimport json\nimport hashlib\n\nSEED = 7\n\n\ndef read_csv_json(file_name) -> pandas.DataFrame:\n if file_name.endswith('json') or file_name.endswith('jsonl'):\n df = pandas.read_json(file_name, lines=True)\n elif file_name.endswith('csv'):\n df = pandas.read_csv(file_name)\n else:\n raise NotImplementedError\n return df\n\n\ndef use_only_alphanumeric(input):\n pattern = re.compile('[\\W^\\'\\\"]+')\n output = pattern.sub(' ', input).strip()\n return output\n\n\ndef tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):\n vectorized_data = []\n # probably could be optimized further\n ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]\n token_list = [tokenizer.tokenize(sample) for sample in ds1]\n\n for tokens in token_list:\n vecs = []\n for token in tokens:\n try:\n vecs.append(embedding_vector[token].tolist())\n except KeyError:\n # print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))\n np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))\n unk_vec = np.random.rand(embedding_dims)\n vecs.append(unk_vec.tolist())\n continue\n vectorized_data.append(vecs)\n return vectorized_data\n\n\ndef pad_trunc(data, maxlen):\n \"\"\"\n For a given dataset pad with zero vectors or truncate to maxlen\n \"\"\"\n new_data = []\n # Create a vector of 0s the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n # Append the appropriate number 0 vectors to the list\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data\n\n\ndef save(model, le, path, history):\n '''\n save model based on model, encoder\n '''\n\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print(f'saving model to {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes')\n with open(structure_file, \"w\") as json_file:\n json_file.write(model.to_json())\n model.save_weights(weight_file)\n np.save(labels_file, le.categories_[0])\n with open(os.path.join(path, \"log.json\"), 'w') as f:\n json.dump(history.history, f)\n\n\ndef load(path):\n print(f'loading model from {path}')\n structure_file = os.path.join(path, 'structure.json')\n weight_file = os.path.join(path, 'weight.h5')\n labels_file = os.path.join(path, 'classes.npy')\n with open(structure_file, \"r\") as json_file:\n json_string = json_file.read()\n model = model_from_json(json_string)\n model.load_weights(weight_file)\n model._make_predict_function()\n #le = preprocessing.LabelEncoder()\n categories = np.load(labels_file)\n le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le.fit([[c] for c in categories])\n json_file.close()\n return model, le\n\n\ndef predict(session, graph, model, vectorized_input, num_classes):\n if session is None:\n raise (\"Session is not initialized\")\n if graph is None:\n raise (\"Graph is not initialized\")\n if model is None:\n raise (\"Model is not initialized\")\n with session.as_default():\n with graph.as_default():\n probs = model.predict_proba(vectorized_input)\n preds = model.predict_classes(vectorized_input)\n preds = to_categorical(preds, num_classes=num_classes)\n return (probs, preds)\n\n\nclass Model:\n def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):\n with open(config_path, 'r') as f:\n self.model_cfg = yaml.safe_load(f)['model']\n self.tokenizer = TreebankWordTokenizer()\n\n with open(word2vec_pkl_path, 'rb') as f:\n self.vectors = pickle.load(f)\n self.model = None\n self.session = None\n self.graph = None\n self.le_encoder = None\n self.label_smoothing = label_smoothing\n\n def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):\n \"\"\"\n Train a model for a given dataset\n Dataset should be a list of tuples consisting of\n training sentence and the class label\n Args:\n tr_set_path: path to training data\n save_path: path to save model weights and labels\n va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True. \n stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split. \n early_stopping: whether to do early stopping\n Returns: \n history of training including average loss for each training epoch\n \n \"\"\"\n df_tr = read_csv_json(tr_set_path)\n if stratified_split:\n df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))\n df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]\n va_messages, va_labels = list(df_va.text), list(df_va.intent)\n va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n (x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)\n else:\n tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)\n tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]\n (x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)\n\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n session.run(tf.global_variables_initializer())\n model = self.__build_model(num_classes=len(le_encoder.categories_[0]))\n model.compile(\n loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),\n #metrics=['categorical_accuracy'],\n optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001\n #optimizer=optimizers.Adam(learning_rate=5e-4)\n )\n # early stopping callback using validation loss \n callback = tf.keras.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n min_delta=0,\n patience=5,\n verbose=0,\n mode=\"auto\",\n baseline=None,\n restore_best_weights=True,\n )\n #callback = EarlyStoppingAtMaxMacroF1(\n # patience=100, # record all epochs\n # validation=(x_va, y_va)\n #)\n\n print('start training')\n history = model.fit(x_train, y_train,\n batch_size=self.model_cfg['batch_size'],\n epochs=100,\n validation_split=va_split if not stratified_split else 0,\n validation_data=(x_va, y_va) if stratified_split else None,\n callbacks=[callback] if early_stopping else None)\n history.history['train_data'] = tr_set_path\n print(f'finished training in {len(history.history[\"loss\"])} epochs')\n save(model, le_encoder, save_path, history)\n self.model = model\n self.session = session\n self.graph = graph\n self.le_encoder = le_encoder\n # return training history \n return history.history\n \n def __preprocess(self, dataset, le_encoder=None):\n '''\n Preprocess the dataset, transform the categorical labels into numbers.\n Get word embeddings for the training data.\n '''\n shuffle(dataset)\n data = [s['data'] for s in dataset]\n #labels = [s['label'] for s in dataset]\n labels = [[s['label']] for s in dataset]\n #le_encoder = preprocessing.LabelEncoder()\n if le_encoder is None: \n le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)\n le_encoder.fit(labels)\n encoded_labels = le_encoder.transform(labels)\n print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))\n #print('train %s intents with %s samples' % (len(set(labels)), len(data)))\n #print(collections.Counter(labels))\n print(le_encoder.categories_[0])\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])\n\n # split_point = int(len(vectorized_data) * .9)\n x_train = vectorized_data # vectorized_data[:split_point]\n y_train = encoded_labels # encoded_labels[:split_point]\n\n x_train = pad_trunc(x_train, self.model_cfg['maxlen'])\n\n x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n y_train = np.array(y_train)\n return x_train, y_train, le_encoder\n\n def __build_model(self, num_classes=2, type='keras'):\n print('Build model')\n model = Sequential()\n layers = self.model_cfg.get('layers', 1)\n for l in range(layers):\n self.__addLayers(model, self.model_cfg)\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n return model\n\n def __addLayers(self, model, model_cfg):\n maxlen = model_cfg.get('maxlen', 400)\n strides = model_cfg.get('strides', 1)\n embedding_dims = model_cfg.get('embedding_dims', 300)\n filters = model_cfg.get('filters', 250)\n activation_type = model_cfg.get('activation', 'relu')\n kernel_size = model_cfg.get('kernel_size', 3)\n hidden_dims = model_cfg.get('hidden_dims', 200)\n\n model.add(Conv1D(\n filters,\n kernel_size,\n padding='valid',\n activation=activation_type,\n strides=strides,\n input_shape=(maxlen, embedding_dims)))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Activation(activation_type))\n\n def load(self, path):\n K.clear_session()\n graph = tf.Graph()\n with graph.as_default():\n session = tf.Session()\n with session.as_default():\n self.session = session\n self.graph = graph\n (model, le) = load(path)\n self.model = model\n self.le_encoder = le\n\n def predict(self, input: List[str]):\n vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])\n x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])\n vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))\n (probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))\n probs = probs.tolist()\n results = self.le_encoder.inverse_transform(preds)\n output = [{'input': input[i],\n 'embeddings': x_train[i],\n #'label': r,\n 'label': r.item(),\n 'highestProb': max(probs[i]),\n #'prob': dict(zip(self.le_encoder.classes_, probs[i]))\n 'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))\n } for i, r in enumerate(results)]\n return output\n",
"step-ids": [
9,
13,
16,
18,
19
]
}
|
[
9,
13,
16,
18,
19
] |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, Float
from sqlalchemy.orm import relationship, backref
ORMBase = declarative_base()
def create_all(engine):
ORMBase.metadata.create_all(engine)
|
normal
|
{
"blob_id": "c7ca8235864ce5de188c4aa2feb9ad82d4fa9b0f",
"index": 4023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_all(engine):\n ORMBase.metadata.create_all(engine)\n",
"step-3": "<mask token>\nORMBase = declarative_base()\n\n\ndef create_all(engine):\n ORMBase.metadata.create_all(engine)\n",
"step-4": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, ForeignKey, Float\nfrom sqlalchemy.orm import relationship, backref\nORMBase = declarative_base()\n\n\ndef create_all(engine):\n ORMBase.metadata.create_all(engine)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
<|reserved_special_token_1|>
from whylogs.core.annotation_profiling import Rectangle
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
<|reserved_special_token_1|>
from whylogs.core.annotation_profiling import Rectangle
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{"name": "test"}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
|
flexible
|
{
"blob_id": "b65d25198d55ab4a859b9718b7b225fa92c13a2b",
"index": 1202,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-4": "from whylogs.core.annotation_profiling import Rectangle\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-5": "from whylogs.core.annotation_profiling import Rectangle\n\n\ndef test_rect():\n\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{\"name\": \"test\"}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
opsi-utils
Test utilities
"""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Generator
@contextmanager
def temp_context() -> Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:
os.chdir(tempdir)
yield origin # return original path
finally:
os.chdir(origin)
|
normal
|
{
"blob_id": "3c2a611fd001f145703853f5ecfe70d0e93844e4",
"index": 4665,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-3": "<mask token>\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() ->Generator[Path, None, None]:\n origin = Path().absolute()\n try:\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True\n ) as tempdir:\n os.chdir(tempdir)\n yield origin\n finally:\n os.chdir(origin)\n",
"step-4": "\"\"\"\nopsi-utils\n\nTest utilities\n\"\"\"\n\nimport os\nimport tempfile\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator\n\n\n@contextmanager\ndef temp_context() -> Generator[Path, None, None]:\n\torigin = Path().absolute()\n\ttry:\n\t\twith tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:\n\t\t\tos.chdir(tempdir)\n\t\t\tyield origin # return original path\n\tfinally:\n\t\tos.chdir(origin)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def df_to_sql_T_1(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number + 1
org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',
(org_id, org_full_name))
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['org_full_name'] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'org_id']
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data['org_id'] = str(org_id)
org_name = str(commit_data.loc[commit_data.org_full_name ==
name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name ==
name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name,
'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name,
'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name,
'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name,
'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name ==
name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.
org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name,
'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name ==
name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name ==
name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name,
'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name,
'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_email'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'
)
l = (org_name, org_full_name, reg_code, reg_time,
found_date, reg_capital, real_capital, region, profile,
address, team, fund_num, is_qualification, prize,
team_scale, investment_idea, master_strategy, remark,
asset_mgt_scale, linkman, linkman_duty, linkman_phone,
linkman_email, org_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('org_info', con, if_exists='append', index=False
)
print('else')
def df_to_sql_T_2(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()
sql_number = len(fund_name_list)
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number + 1
fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(
fund_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'
, (fund_id, fund_full_name))
excel_name_list = excel_df['★基金全称'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['fund_full_name'] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'fund_id']
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data['fund_id'] = str(fund_id)
group = str(commit_data.loc[commit_data.fund_full_name == name,
'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name ==
name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.
fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==
name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name ==
name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.
fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name ==
name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.
fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.
fund_full_name == name, 'manager_participation_scale'].
values[0])
investment_idea = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.
fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"
)
l = (group, fund_type_strategy, reg_code, foundation_date,
fund_name, fund_full_name, fund_manager,
fund_manager_nominal, fund_stockbroker, fund_custodian,
fund_member, fund_type_issuance, fund_type_structure,
fund_structure, issue_scale, asset_scale, is_main_fund,
fee_pay, open_date, locked_time_limit, duration,
fee_manage, fee_pay_remark, fee_redeem,
fee_subscription, fee_trust, investment_range,
min_purchase_amount, min_append_amount, stop_line,
alert_line, manager_participation_scale,
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('fund_info', con, if_exists='append', index=
False)
print('else')
def df_to_sql_T_3(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()
sql_number = len(user_list)
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number + 1
user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(
user_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',
(user_id, user_name))
excel_name_list = excel_df['★姓名'].tolist()
for name in excel_name_list:
if name in user_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['user_name'] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'user_id']
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data['user_id'] = str(user_id)
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex']
.values[0])
org_name = str(commit_data.loc[commit_data.user_name == name,
'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name ==
name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name,
'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name,
'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name ==
name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name,
'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name,
'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name ==
name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name,
'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.
user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name ==
name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name,
'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==
name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name,
'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'
)
l = (user_name, sex, org_name, introduction, photo,
entry_date, investment_years, education, duty,
qualification, background, is_fund_qualification,
is_core_member, resume, max_asset_mgt_scale, prize,
remark, user_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('manager_info', con, if_exists='append',
index=False)
print('else')
<|reserved_special_token_0|>
def listing(request):
context = {}
if request.method == 'POST':
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
user_upload_file = uf.cleaned_data['user_upload_file']
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = (
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\'
)
filefullpath = path + file_name
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
for sheet in range(1, 5):
if sheet == 1:
row_name = '公司资料简介'
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = '基金简介'
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = '人员简介'
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = '基金简称'
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
<|reserved_special_token_0|>
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def df_to_sql_T_1(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number + 1
org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',
(org_id, org_full_name))
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['org_full_name'] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'org_id']
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data['org_id'] = str(org_id)
org_name = str(commit_data.loc[commit_data.org_full_name ==
name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name ==
name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name,
'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name,
'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name,
'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name,
'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name ==
name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.
org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name,
'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name ==
name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name ==
name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name,
'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name,
'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_email'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'
)
l = (org_name, org_full_name, reg_code, reg_time,
found_date, reg_capital, real_capital, region, profile,
address, team, fund_num, is_qualification, prize,
team_scale, investment_idea, master_strategy, remark,
asset_mgt_scale, linkman, linkman_duty, linkman_phone,
linkman_email, org_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('org_info', con, if_exists='append', index=False
)
print('else')
def df_to_sql_T_2(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()
sql_number = len(fund_name_list)
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number + 1
fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(
fund_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'
, (fund_id, fund_full_name))
excel_name_list = excel_df['★基金全称'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['fund_full_name'] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'fund_id']
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data['fund_id'] = str(fund_id)
group = str(commit_data.loc[commit_data.fund_full_name == name,
'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name ==
name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.
fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==
name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name ==
name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.
fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name ==
name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.
fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.
fund_full_name == name, 'manager_participation_scale'].
values[0])
investment_idea = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.
fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"
)
l = (group, fund_type_strategy, reg_code, foundation_date,
fund_name, fund_full_name, fund_manager,
fund_manager_nominal, fund_stockbroker, fund_custodian,
fund_member, fund_type_issuance, fund_type_structure,
fund_structure, issue_scale, asset_scale, is_main_fund,
fee_pay, open_date, locked_time_limit, duration,
fee_manage, fee_pay_remark, fee_redeem,
fee_subscription, fee_trust, investment_range,
min_purchase_amount, min_append_amount, stop_line,
alert_line, manager_participation_scale,
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('fund_info', con, if_exists='append', index=
False)
print('else')
def df_to_sql_T_3(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()
sql_number = len(user_list)
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number + 1
user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(
user_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',
(user_id, user_name))
excel_name_list = excel_df['★姓名'].tolist()
for name in excel_name_list:
if name in user_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['user_name'] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'user_id']
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data['user_id'] = str(user_id)
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex']
.values[0])
org_name = str(commit_data.loc[commit_data.user_name == name,
'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name ==
name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name,
'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name,
'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name ==
name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name,
'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name,
'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name ==
name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name,
'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.
user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name ==
name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name,
'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==
name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name,
'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'
)
l = (user_name, sex, org_name, introduction, photo,
entry_date, investment_years, education, duty,
qualification, background, is_fund_qualification,
is_core_member, resume, max_asset_mgt_scale, prize,
remark, user_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('manager_info', con, if_exists='append',
index=False)
print('else')
<|reserved_special_token_0|>
def listing(request):
context = {}
if request.method == 'POST':
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
user_upload_file = uf.cleaned_data['user_upload_file']
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = (
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\'
)
filefullpath = path + file_name
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
for sheet in range(1, 5):
if sheet == 1:
row_name = '公司资料简介'
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = '基金简介'
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = '人员简介'
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = '基金简称'
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
def index_login(request):
context = {}
if request.method == 'GET':
form = AuthenticationForm
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
return redirect(to='list')
context['form'] = form
return render(request, 'register_login.html', context)
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def df_to_sql_T_1(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number + 1
org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',
(org_id, org_full_name))
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['org_full_name'] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'org_id']
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data['org_id'] = str(org_id)
org_name = str(commit_data.loc[commit_data.org_full_name ==
name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name ==
name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name,
'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name,
'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name,
'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name,
'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name ==
name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.
org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name,
'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name ==
name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name ==
name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name,
'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name,
'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_email'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'
)
l = (org_name, org_full_name, reg_code, reg_time,
found_date, reg_capital, real_capital, region, profile,
address, team, fund_num, is_qualification, prize,
team_scale, investment_idea, master_strategy, remark,
asset_mgt_scale, linkman, linkman_duty, linkman_phone,
linkman_email, org_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('org_info', con, if_exists='append', index=False
)
print('else')
def df_to_sql_T_2(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()
sql_number = len(fund_name_list)
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number + 1
fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(
fund_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'
, (fund_id, fund_full_name))
excel_name_list = excel_df['★基金全称'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['fund_full_name'] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'fund_id']
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data['fund_id'] = str(fund_id)
group = str(commit_data.loc[commit_data.fund_full_name == name,
'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name ==
name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.
fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==
name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name ==
name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.
fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name ==
name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.
fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.
fund_full_name == name, 'manager_participation_scale'].
values[0])
investment_idea = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.
fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"
)
l = (group, fund_type_strategy, reg_code, foundation_date,
fund_name, fund_full_name, fund_manager,
fund_manager_nominal, fund_stockbroker, fund_custodian,
fund_member, fund_type_issuance, fund_type_structure,
fund_structure, issue_scale, asset_scale, is_main_fund,
fee_pay, open_date, locked_time_limit, duration,
fee_manage, fee_pay_remark, fee_redeem,
fee_subscription, fee_trust, investment_range,
min_purchase_amount, min_append_amount, stop_line,
alert_line, manager_participation_scale,
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('fund_info', con, if_exists='append', index=
False)
print('else')
def df_to_sql_T_3(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()
sql_number = len(user_list)
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number + 1
user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(
user_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',
(user_id, user_name))
excel_name_list = excel_df['★姓名'].tolist()
for name in excel_name_list:
if name in user_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['user_name'] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'user_id']
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data['user_id'] = str(user_id)
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex']
.values[0])
org_name = str(commit_data.loc[commit_data.user_name == name,
'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name ==
name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name,
'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name,
'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name ==
name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name,
'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name,
'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name ==
name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name,
'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.
user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name ==
name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name,
'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==
name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name,
'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'
)
l = (user_name, sex, org_name, introduction, photo,
entry_date, investment_years, education, duty,
qualification, background, is_fund_qualification,
is_core_member, resume, max_asset_mgt_scale, prize,
remark, user_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('manager_info', con, if_exists='append',
index=False)
print('else')
def df_to_sql_4(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df[row_name] = excel_df[row_name].ffill()
excel_df.index = range(len(excel_df))
print(excel_df)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_nav_data'
sql_df = pd.read_sql(sql, con)
name_list = sql_df['fund_name'].tolist()
date_list = sql_df['statistic_date'].tolist()
print('name_list')
print(name_list)
print('date_list')
print(date_list)
for fund_name in sql_df['fund_name'].unique():
sql = 'SELECT * FROM fund_info'
fund_info_sql_df = pd.read_sql(sql, con)
fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name ==
fund_name, 'fund_id'].values[0]
with con:
cur = con.cursor()
cur.execute('UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?',
(fund_id, fund_name))
excel_name_list = excel_df['基金简称'].tolist()
excel_name_list = list(set(excel_name_list))
print('excel_name_list')
print(excel_name_list)
for name in excel_name_list:
statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']
excel_date_list = statistic_date_series.tolist()
excel_date_list = [str(i) for i in excel_date_list]
print('excel_date_list')
print(excel_date_list)
for date in excel_date_list:
if name in name_list and date in date_list:
commit_data = excel_df[excel_df['基金简称'] == name]
print(commit_data.columns)
commit_data.columns = ['fund_name', 'statistic_date', 'nav',
'added_nav', 'total_share', 'total_asset', 'total_nav',
'is_split', 'is_open_date', 'split_ratio',
'after_tax_bonus']
commit_data['fund_id'] = str(fund_id)
fund_name = name
statistic_date = str(date)
nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'nav'].values[0])
added_nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'added_nav'].values[0])
total_share = str(commit_data.loc[commit_data.
statistic_date == date, 'total_share'].values[0])
total_asset = str(commit_data.loc[commit_data.
statistic_date == date, 'total_asset'].values[0])
total_nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'total_nav'].values[0])
is_split = str(commit_data.loc[commit_data.statistic_date ==
date, 'is_split'].values[0])
is_open_date = str(commit_data.loc[commit_data.
statistic_date == date, 'is_open_date'].values[0])
split_ratio = str(commit_data.loc[commit_data.
statistic_date == date, 'split_ratio'].values[0])
after_tax_bonus = str(commit_data.loc[commit_data.
statistic_date == date, 'after_tax_bonus'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?'
)
l = (nav, added_nav, total_share, total_asset,
total_nav, is_split, is_open_date, split_ratio,
after_tax_bonus, fund_name, statistic_date)
cur.execute(sql, l)
print('if')
else:
commit_data = excel_df[(excel_df['基金简称'] == name) & (
excel_df['净值日期'] == date)]
commit_data.columns = ['fund_name', 'statistic_date', 'nav',
'added_nav', 'total_share', 'total_asset', 'total_nav',
'is_split', 'is_open_date', 'split_ratio',
'after_tax_bonus']
commit_data.to_sql('fund_nav_data', con, if_exists='append',
index=False)
print('else')
def listing(request):
context = {}
if request.method == 'POST':
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
user_upload_file = uf.cleaned_data['user_upload_file']
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = (
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\'
)
filefullpath = path + file_name
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
for sheet in range(1, 5):
if sheet == 1:
row_name = '公司资料简介'
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = '基金简介'
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = '人员简介'
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = '基金简称'
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
def index_login(request):
context = {}
if request.method == 'GET':
form = AuthenticationForm
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
return redirect(to='list')
context['form'] = form
return render(request, 'register_login.html', context)
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render, Http404, HttpResponse, redirect
from django.contrib.auth import authenticate, login
from website.form import UserForm
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from website.models import UserProfile
from website.form import UserForm
import pandas as pd
from pandas import DataFrame
from sqlalchemy import create_engine
from django.contrib.auth.decorators import login_required
import sqlite3
import xlrd
import uuid
def df_to_sql_T_1(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number + 1
org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',
(org_id, org_full_name))
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM org_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['org_full_name'] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'org_id']
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data['org_id'] = str(org_id)
org_name = str(commit_data.loc[commit_data.org_full_name ==
name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name ==
name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name ==
name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name,
'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name,
'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name,
'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name,
'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name ==
name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.
org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name,
'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name ==
name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name ==
name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name,
'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==
name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name,
'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name ==
name, 'linkman_email'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'
)
l = (org_name, org_full_name, reg_code, reg_time,
found_date, reg_capital, real_capital, region, profile,
address, team, fund_num, is_qualification, prize,
team_scale, investment_idea, master_strategy, remark,
asset_mgt_scale, linkman, linkman_duty, linkman_phone,
linkman_email, org_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★机构全名'] == name]
commit_data.columns = ['org_name', 'org_full_name', 'reg_code',
'reg_time', 'found_date', 'reg_capital', 'real_capital',
'region', 'profile', 'address', 'team', 'fund_num',
'is_qualification', 'prize', 'team_scale',
'investment_idea', 'master_strategy', 'remark',
'asset_mgt_scale', 'linkman', 'linkman_duty',
'linkman_phone', 'linkman_email']
commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('org_info', con, if_exists='append', index=False
)
print('else')
def df_to_sql_T_2(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()
sql_number = len(fund_name_list)
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number + 1
fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(
fund_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'
, (fund_id, fund_full_name))
excel_name_list = excel_df['★基金全称'].tolist()
for name in excel_name_list:
if name in fund_name_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM fund_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['fund_full_name'] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'fund_id']
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data['fund_id'] = str(fund_id)
group = str(commit_data.loc[commit_data.fund_full_name == name,
'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name ==
name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.
fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.
fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name ==
name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==
name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name ==
name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.
fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name ==
name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.
fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name ==
name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.
fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name ==
name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.
fund_full_name == name, 'manager_participation_scale'].
values[0])
investment_idea = str(commit_data.loc[commit_data.
fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.
fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"
)
l = (group, fund_type_strategy, reg_code, foundation_date,
fund_name, fund_full_name, fund_manager,
fund_manager_nominal, fund_stockbroker, fund_custodian,
fund_member, fund_type_issuance, fund_type_structure,
fund_structure, issue_scale, asset_scale, is_main_fund,
fee_pay, open_date, locked_time_limit, duration,
fee_manage, fee_pay_remark, fee_redeem,
fee_subscription, fee_trust, investment_range,
min_purchase_amount, min_append_amount, stop_line,
alert_line, manager_participation_scale,
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★基金全称'] == name]
commit_data.columns = ['group', 'fund_type_strategy',
'reg_code', 'foundation_date', 'fund_name',
'fund_full_name', 'fund_manager', 'fund_manager_nominal',
'fund_stockbroker', 'fund_custodian', 'fund_member',
'fund_type_issuance', 'fund_type_structure',
'fund_structure', 'issue_scale', 'asset_scale',
'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',
'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',
'fee_subscription', 'fee_trust', 'investment_range',
'min_purchase_amount', 'min_append_amount', 'stop_line',
'alert_line', 'manager_participation_scale',
'investment_idea', 'structure_hierarchy', 'remark']
commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('fund_info', con, if_exists='append', index=
False)
print('else')
def df_to_sql_T_3(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df = excel_df.dropna(axis=1, how='all')
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()
sql_number = len(user_list)
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number + 1
user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(
user_id_number)
with con:
cur = con.cursor()
cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',
(user_id, user_name))
excel_name_list = excel_df['★姓名'].tolist()
for name in excel_name_list:
if name in user_list:
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3'
)
sql = 'SELECT * FROM manager_info'
sql_df = pd.read_sql(sql, con)
name_dataframe = sql_df[sql_df['user_name'] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(),
'user_id']
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data['user_id'] = str(user_id)
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex']
.values[0])
org_name = str(commit_data.loc[commit_data.user_name == name,
'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name ==
name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name,
'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name,
'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name ==
name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name,
'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name,
'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name ==
name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name,
'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.
user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name ==
name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name,
'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==
name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name,
'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name,
'remark'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'
)
l = (user_name, sex, org_name, introduction, photo,
entry_date, investment_years, education, duty,
qualification, background, is_fund_qualification,
is_core_member, resume, max_asset_mgt_scale, prize,
remark, user_id)
cur.execute(sql, l)
print('if')
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df['★姓名'] == name]
commit_data.columns = ['user_name', 'sex', 'org_name',
'introduction', 'photo', 'entry_date', 'investment_years',
'education', 'duty', 'qualification', 'background',
'is_fund_qualification', 'is_core_member', 'resume',
'max_asset_mgt_scale', 'prize', 'remark']
commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(
sql_number))) + str(sql_number)
commit_data.to_sql('manager_info', con, if_exists='append',
index=False)
print('else')
def df_to_sql_4(filefullpath, sheet, row_name):
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how='all')
excel_df[row_name] = excel_df[row_name].ffill()
excel_df.index = range(len(excel_df))
print(excel_df)
con = sqlite3.connect(
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3')
sql = 'SELECT * FROM fund_nav_data'
sql_df = pd.read_sql(sql, con)
name_list = sql_df['fund_name'].tolist()
date_list = sql_df['statistic_date'].tolist()
print('name_list')
print(name_list)
print('date_list')
print(date_list)
for fund_name in sql_df['fund_name'].unique():
sql = 'SELECT * FROM fund_info'
fund_info_sql_df = pd.read_sql(sql, con)
fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name ==
fund_name, 'fund_id'].values[0]
with con:
cur = con.cursor()
cur.execute('UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?',
(fund_id, fund_name))
excel_name_list = excel_df['基金简称'].tolist()
excel_name_list = list(set(excel_name_list))
print('excel_name_list')
print(excel_name_list)
for name in excel_name_list:
statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']
excel_date_list = statistic_date_series.tolist()
excel_date_list = [str(i) for i in excel_date_list]
print('excel_date_list')
print(excel_date_list)
for date in excel_date_list:
if name in name_list and date in date_list:
commit_data = excel_df[excel_df['基金简称'] == name]
print(commit_data.columns)
commit_data.columns = ['fund_name', 'statistic_date', 'nav',
'added_nav', 'total_share', 'total_asset', 'total_nav',
'is_split', 'is_open_date', 'split_ratio',
'after_tax_bonus']
commit_data['fund_id'] = str(fund_id)
fund_name = name
statistic_date = str(date)
nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'nav'].values[0])
added_nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'added_nav'].values[0])
total_share = str(commit_data.loc[commit_data.
statistic_date == date, 'total_share'].values[0])
total_asset = str(commit_data.loc[commit_data.
statistic_date == date, 'total_asset'].values[0])
total_nav = str(commit_data.loc[commit_data.statistic_date ==
date, 'total_nav'].values[0])
is_split = str(commit_data.loc[commit_data.statistic_date ==
date, 'is_split'].values[0])
is_open_date = str(commit_data.loc[commit_data.
statistic_date == date, 'is_open_date'].values[0])
split_ratio = str(commit_data.loc[commit_data.
statistic_date == date, 'split_ratio'].values[0])
after_tax_bonus = str(commit_data.loc[commit_data.
statistic_date == date, 'after_tax_bonus'].values[0])
with con:
cur = con.cursor()
sql = (
'UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?'
)
l = (nav, added_nav, total_share, total_asset,
total_nav, is_split, is_open_date, split_ratio,
after_tax_bonus, fund_name, statistic_date)
cur.execute(sql, l)
print('if')
else:
commit_data = excel_df[(excel_df['基金简称'] == name) & (
excel_df['净值日期'] == date)]
commit_data.columns = ['fund_name', 'statistic_date', 'nav',
'added_nav', 'total_share', 'total_asset', 'total_nav',
'is_split', 'is_open_date', 'split_ratio',
'after_tax_bonus']
commit_data.to_sql('fund_nav_data', con, if_exists='append',
index=False)
print('else')
def listing(request):
context = {}
if request.method == 'POST':
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
user_upload_file = uf.cleaned_data['user_upload_file']
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = (
'C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\'
)
filefullpath = path + file_name
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
for sheet in range(1, 5):
if sheet == 1:
row_name = '公司资料简介'
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = '基金简介'
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = '人员简介'
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = '基金简称'
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
def index_login(request):
context = {}
if request.method == 'GET':
form = AuthenticationForm
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
return redirect(to='list')
context['form'] = form
return render(request, 'register_login.html', context)
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render, Http404, HttpResponse, redirect
from django.contrib.auth import authenticate, login
from website.form import UserForm
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from website.models import UserProfile
from website.form import UserForm
import pandas as pd
from pandas import DataFrame
from sqlalchemy import create_engine
from django.contrib.auth.decorators import login_required
import sqlite3
import xlrd
import uuid
def df_to_sql_T_1(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM org_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['org_full_name'].tolist()
sql_number = len(fund_name_list)
#依次对数据库中的每一行添加一列id
org_id_number = 0
for org_full_name in sql_df['org_full_name'].unique():
org_id_number = org_id_number+1
org_id = 'O'+'0'*(5-len(str(org_id_number)))+str(org_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE org_info SET org_id=? WHERE org_full_name=?""", (org_id, org_full_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★机构全名'].tolist()
for name in excel_name_list:
if name in fund_name_list:
#提取数据库中的org_full_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM org_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["org_full_name"] == name]
org_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'org_id']
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★机构全名"] == name]
commit_data.columns = ["org_name", "org_full_name", "reg_code", "reg_time", "found_date", "reg_capital",
"real_capital", "region", "profile", "address", "team", "fund_num",
"is_qualification", "prize", "team_scale", "investment_idea", "master_strategy",
"remark", "asset_mgt_scale", "linkman", "linkman_duty", "linkman_phone",
"linkman_email"]
commit_data["org_id"] = str(org_id)
#把一行表格dataframe提取其中的值
org_name = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])
org_full_name = str(name)
reg_code = str(commit_data.loc[commit_data.org_full_name == name, 'reg_code'].values[0])
reg_time = str(commit_data.loc[commit_data.org_full_name == name, 'reg_time'].values[0])
found_date = str(commit_data.loc[commit_data.org_full_name == name, 'found_date'].values[0])
reg_capital = str(commit_data.loc[commit_data.org_full_name == name, 'reg_capital'].values[0])
real_capital = str(commit_data.loc[commit_data.org_full_name == name, 'real_capital'].values[0])
region = str(commit_data.loc[commit_data.org_full_name == name, 'region'].values[0])
profile = str(commit_data.loc[commit_data.org_full_name == name, 'profile'].values[0])
address = str(commit_data.loc[commit_data.org_full_name == name, 'address'].values[0])
team = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])
fund_num = str(commit_data.loc[commit_data.org_full_name == name, 'team'].values[0])
is_qualification = str(commit_data.loc[commit_data.org_full_name == name, 'is_qualification'].values[0])
prize = str(commit_data.loc[commit_data.org_full_name == name, 'prize'].values[0])
team_scale = str(commit_data.loc[commit_data.org_full_name == name, 'team_scale'])
investment_idea = str(commit_data.loc[commit_data.org_full_name == name, 'investment_idea'].values[0])
master_strategy = str(commit_data.loc[commit_data.org_full_name == name, 'master_strategy'].values[0])
remark = str(commit_data.loc[commit_data.org_full_name == name, 'remark'].values[0])
asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name == name, 'asset_mgt_scale'].values[0])
linkman = str(commit_data.loc[commit_data.org_full_name == name, 'linkman'].values[0])
linkman_duty = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_duty'].values[0])
linkman_phone = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_phone'].values[0])
linkman_email = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_email'].values[0])
# org_name = str(commit_data.loc[index.last_valid_index(), "org_name"])
with con:
cur = con.cursor()
sql = """UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, \
reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, \
prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, \
linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?"""
l = (org_name, org_full_name, reg_code, reg_time, found_date, reg_capital, real_capital, region, profile,\
address, team, fund_num, is_qualification, prize, team_scale, investment_idea, master_strategy, remark,\
asset_mgt_scale, linkman, linkman_duty, linkman_phone, linkman_email, org_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★机构全名"] == name]
commit_data.columns = ["org_name", "org_full_name", "reg_code", "reg_time", "found_date", "reg_capital",
"real_capital", "region", "profile", "address", "team", "fund_num",
"is_qualification", "prize", "team_scale", "investment_idea", "master_strategy",
"remark", "asset_mgt_scale", "linkman", "linkman_duty", "linkman_phone",
"linkman_email"]
commit_data.loc[:, "org_id"] = 'O'+'0'*(5-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("org_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_T_2(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]
excel_df = excel_df.drop(row_name, axis=0, inplace=False)
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
fund_name_list = sql_df['fund_full_name'].tolist()#list
sql_number = len(fund_name_list)
#依次对数据库中的每一行添加一列id
fund_id_number = 0
for fund_full_name in sql_df['fund_full_name'].unique():
fund_id_number = fund_id_number+1
fund_id = 'F'+'0'*(6-len(str(fund_id_number)))+str(fund_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE fund_info SET fund_id=? WHERE fund_full_name=?""", (fund_id, fund_full_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★基金全称'].tolist()#list
for name in excel_name_list:
if name in fund_name_list:
#提取数据库中的org_full_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["fund_full_name"] == name]
fund_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'fund_id']
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★基金全称"] == name]
commit_data.columns = ["group", "fund_type_strategy", "reg_code", "foundation_date", "fund_name",
"fund_full_name", "fund_manager", "fund_manager_nominal", "fund_stockbroker",
"fund_custodian", "fund_member", "fund_type_issuance", "fund_type_structure",
"fund_structure", "issue_scale", "asset_scale", "is_main_fund", "fee_pay",
"open_date", "locked_time_limit", "duration", "fee_manage", "fee_pay_remark",
"fee_redeem", "fee_subscription", "fee_trust", "investment_range",
"min_purchase_amount", "min_append_amount", "stop_line", "alert_line",
"manager_participation_scale", "investment_idea", "structure_hierarchy", "remark"]
commit_data["fund_id"] = str(fund_id)
#把一行表格dataframe提取其中的值
group = str(commit_data.loc[commit_data.fund_full_name == name, 'group'].values[0])
fund_type_strategy = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_strategy'].values[0])
reg_code = str(commit_data.loc[commit_data.fund_full_name == name, 'reg_code'].values[0])
foundation_date = str(commit_data.loc[commit_data.fund_full_name == name, 'foundation_date'].values[0])
fund_name = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_name'].values[0])
fund_full_name = str(name)
fund_manager = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager'].values[0])
fund_manager_nominal = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager_nominal'].values[0])
fund_stockbroker = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_stockbroker'].values[0])
fund_custodian = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_custodian'].values[0])
fund_member = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_member'].values[0])
fund_type_issuance = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_issuance'].values[0])
fund_type_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_structure'].values[0])
fund_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_structure'].values[0])
issue_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'issue_scale'].values[0])
asset_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'asset_scale'].values[0])
is_main_fund = str(commit_data.loc[commit_data.fund_full_name == name, 'is_main_fund'].values[0])
fee_pay = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay'].values[0])
open_date = str(commit_data.loc[commit_data.fund_full_name == name, 'open_date'])
locked_time_limit = str(commit_data.loc[commit_data.fund_full_name == name, 'locked_time_limit'].values[0])
duration = str(commit_data.loc[commit_data.fund_full_name == name, 'duration'].values[0])
fee_manage = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_manage'].values[0])
fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay_remark'].values[0])
fee_redeem = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_redeem'].values[0])
fee_subscription = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_subscription'].values[0])
fee_trust = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_trust'].values[0])
investment_range = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_range'].values[0])
min_purchase_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_purchase_amount'].values[0])
min_append_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_append_amount'].values[0])
stop_line = str(commit_data.loc[commit_data.fund_full_name == name, 'stop_line'].values[0])
alert_line = str(commit_data.loc[commit_data.fund_full_name == name, 'alert_line'].values[0])
manager_participation_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'manager_participation_scale'].values[0])
investment_idea = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_idea'].values[0])
structure_hierarchy = str(commit_data.loc[commit_data.fund_full_name == name, 'structure_hierarchy'].values[0])
remark = str(commit_data.loc[commit_data.fund_full_name == name, 'remark'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?,\
fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?,\
fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?,\
open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?,\
investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, \
investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?"""
l = (group, fund_type_strategy, reg_code, foundation_date, fund_name, fund_full_name, fund_manager, \
fund_manager_nominal, fund_stockbroker, fund_custodian, fund_member, fund_type_issuance, \
fund_type_structure, fund_structure, issue_scale, asset_scale, is_main_fund, fee_pay, open_date, \
locked_time_limit, duration, fee_manage, fee_pay_remark, fee_redeem, fee_subscription, fee_trust, \
investment_range, min_purchase_amount, min_append_amount, stop_line, alert_line, manager_participation_scale, \
investment_idea, structure_hierarchy, remark, fund_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★基金全称"] == name]
commit_data.columns = ["group", "fund_type_strategy", "reg_code", "foundation_date", "fund_name", "fund_full_name", \
"fund_manager", "fund_manager_nominal", "fund_stockbroker", "fund_custodian", "fund_member", \
"fund_type_issuance", "fund_type_structure", "fund_structure", "issue_scale", "asset_scale", \
"is_main_fund", "fee_pay", "open_date", "locked_time_limit", "duration", "fee_manage", \
"fee_pay_remark", "fee_redeem", "fee_subscription", "fee_trust", "investment_range", \
"min_purchase_amount", "min_append_amount", "stop_line", "alert_line", "manager_participation_scale", \
"investment_idea", "structure_hierarchy", "remark"]
commit_data.loc[:, "fund_id"] = 'F'+'0'*(6-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("fund_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_T_3(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns
#读取存在文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
excel_df = excel_df.dropna(axis=1, how="all")
excel_df = excel_df.T
excel_df.columns = excel_df.loc[row_name]#把【人员简介】的这一行变成columns这一列
excel_df = excel_df.drop(row_name, axis=0, inplace=False)#去除【人员简介】这一行
excel_df.index = range(len(excel_df))
excel_df.drop_duplicates(subset=['★姓名'], inplace=True)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM manager_info"#!!!注意sql中没有表格会出错
sql_df = pd.read_sql(sql, con)
user_list = sql_df['user_name'].tolist()#list
sql_number = len(user_list)
#依次对数据库中的每一行添加一列id
user_id_number = 0
for user_name in sql_df['user_name'].unique():
user_id_number = user_id_number+1
user_id = 'M'+'0'*(5-len(str(user_id_number)))+str(user_id_number)
with con:
cur = con.cursor()
cur.execute("""UPDATE manager_info SET user_id=? WHERE user_name=?""", (user_id, user_name))
#对excel进行读取
#excel_data = pd.read_excel(filefullpath, sheetname=sheet)
excel_name_list = excel_df['★姓名'].tolist()#list
for name in excel_name_list:
if name in user_list:
#提取数据库中的user_name为name的id
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM manager_info"
sql_df = pd.read_sql(sql, con)
name_dataframe =sql_df[sql_df["user_name"] == name]
user_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'user_id']#loc到最后一个有效的index和fund_id,取出值
#把excel的一行变成dataframe,并且加上id,并上传到数据库
commit_data = excel_df[excel_df["★姓名"] == name]
commit_data.columns = ["user_name", "sex", "org_name", "introduction", "photo", "entry_date",
"investment_years", "education", "duty", "qualification", "background", "is_fund_qualification",
"is_core_member", "resume", "max_asset_mgt_scale", "prize", "remark"]
commit_data["user_id"] = str(user_id)#不需要
#把一行表格dataframe提取其中的值
user_name = str(name)
sex = str(commit_data.loc[commit_data.user_name == name, 'sex'].values[0])
org_name = str(commit_data.loc[commit_data.user_name == name, 'org_name'].values[0])
introduction = str(commit_data.loc[commit_data.user_name == name, 'introduction'].values[0])
photo = str(commit_data.loc[commit_data.user_name == name, 'photo'].values[0])
entry_date = str(commit_data.loc[commit_data.user_name == name, 'entry_date'].values[0])
investment_years = str(commit_data.loc[commit_data.user_name == name, 'investment_years'].values[0])
education = str(commit_data.loc[commit_data.user_name == name, 'education'].values[0])
duty = str(commit_data.loc[commit_data.user_name == name, 'duty'].values[0])
qualification = str(commit_data.loc[commit_data.user_name == name, 'qualification'].values[0])
background = str(commit_data.loc[commit_data.user_name == name, 'background'].values[0])
is_fund_qualification = str(commit_data.loc[commit_data.user_name == name, 'is_fund_qualification'].values[0])
is_core_member = str(commit_data.loc[commit_data.user_name == name, 'is_core_member'].values[0])
resume = str(commit_data.loc[commit_data.user_name == name, 'resume'].values[0])
max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name == name, 'max_asset_mgt_scale'].values[0])
prize = str(commit_data.loc[commit_data.user_name == name, 'prize'].values[0])
remark = str(commit_data.loc[commit_data.user_name == name, 'remark'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, \
entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, \
is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?"""
l = (user_name, sex, org_name, introduction, photo, entry_date, investment_years, education, \
duty, qualification, background, is_fund_qualification, is_core_member, resume, max_asset_mgt_scale, \
prize, remark, user_id)
cur.execute(sql, l)
print("if")
else:
sql_number = sql_number + 1
commit_data = excel_df[excel_df["★姓名"] == name]
commit_data.columns = ["user_name", "sex", "org_name", "introduction", "photo", "entry_date", \
"investment_years", "education", "duty", "qualification", "background", \
"is_fund_qualification", "is_core_member", "resume", "max_asset_mgt_scale", "prize", \
"remark"]
commit_data.loc[:, "user_id"] = 'M'+'0'*(5-len(str(sql_number)))+str(sql_number)
commit_data.to_sql("manager_info", con, if_exists="append", index=False)
print("else")
def df_to_sql_4(filefullpath, sheet, row_name):
#读取处理文件夹中的excel
excel_df = pd.read_excel(filefullpath, sheetname=sheet)
excel_df = excel_df.dropna(how="all")
#excel_df = excel_df.dropna(axis=1, how="all")
excel_df[row_name] = excel_df[row_name].ffill()
excel_df.index = range(len(excel_df))
print(excel_df)
#数据库的读取
con = sqlite3.connect(r"C:\Users\K\Desktop\excel-upload-sqlite3\mins\db.sqlite3")
sql = "SELECT * FROM fund_nav_data"
sql_df = pd.read_sql(sql, con)
name_list = sql_df['fund_name'].tolist()
date_list = sql_df['statistic_date'].tolist()
print("name_list")
#print(type(name_list[0]))
print(name_list)
print("date_list")
#print(type(date_list[0]))
print(date_list)
#从fund_info数据表中提取出fund_id,加入fund_nav_data数据表中的fund_id
for fund_name in sql_df['fund_name'].unique():
sql = "SELECT * FROM fund_info"
fund_info_sql_df = pd.read_sql(sql, con)
fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name == fund_name, 'fund_id'].values[0]
with con:
cur = con.cursor()
cur.execute("""UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?""", (fund_id, fund_name))
#对excel_df进行读取
excel_name_list = excel_df['基金简称'].tolist()
excel_name_list = list(set(excel_name_list))
print("excel_name_list")
#print(type(excel_name_list[0]))
print(excel_name_list)
for name in excel_name_list:
statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']
excel_date_list = statistic_date_series.tolist()
excel_date_list = [str(i) for i in excel_date_list]
print("excel_date_list")
#print(type(excel_date_list[0]))
print(excel_date_list)
for date in excel_date_list:
if name in name_list and date in date_list:
commit_data = excel_df[excel_df['基金简称'] == name]
print(commit_data.columns)
commit_data.columns = ["fund_name", "statistic_date", "nav", "added_nav", "total_share", "total_asset", "total_nav", "is_split", "is_open_date", "split_ratio", "after_tax_bonus"]
commit_data["fund_id"] = str(fund_id)
fund_name = name
statistic_date = str(date)
nav = str(commit_data.loc[commit_data.statistic_date == date, 'nav'].values[0])
added_nav = str(commit_data.loc[commit_data.statistic_date == date, 'added_nav'].values[0])
total_share = str(commit_data.loc[commit_data.statistic_date == date, 'total_share'].values[0])
total_asset = str(commit_data.loc[commit_data.statistic_date == date, 'total_asset'].values[0])
total_nav = str(commit_data.loc[commit_data.statistic_date == date, 'total_nav'].values[0])
is_split = str(commit_data.loc[commit_data.statistic_date == date, 'is_split'].values[0])
is_open_date = str(commit_data.loc[commit_data.statistic_date == date, 'is_open_date'].values[0])
split_ratio = str(commit_data.loc[commit_data.statistic_date == date, 'split_ratio'].values[0])
after_tax_bonus = str(commit_data.loc[commit_data.statistic_date == date, 'after_tax_bonus'].values[0])
with con:
cur = con.cursor()
sql = """UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?"""
l = (nav, added_nav, total_share, total_asset, total_nav, is_split, is_open_date, split_ratio, after_tax_bonus, fund_name, statistic_date)
cur.execute(sql, l)
print("if")
else:
commit_data = excel_df[(excel_df["基金简称"] == name)&(excel_df["净值日期"] == date)]
commit_data.columns = ["fund_name", "statistic_date", "nav", "added_nav", "total_share", "total_asset", "total_nav", "is_split", "is_open_date", "split_ratio", "after_tax_bonus"]
commit_data.to_sql("fund_nav_data", con, if_exists="append", index=False)
print("else")
def listing(request):
context = {}
if request.method == "POST":
uf = UserForm(request.POST, request.FILES)
if request.user.username and uf.is_valid():
#username = uf.cleaned_data['username']
user_upload_file = uf.cleaned_data['user_upload_file']
#写入数据库
profile = UserProfile()
profile.username = request.user.username
profile.user_upload_file = user_upload_file
profile.save()
file_name = request.FILES.get('user_upload_file').name
path = "C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\upload\\upload\\"
#C:\Users\K\Desktop\excel - upload - sqlite3\excel - upload - sqlite3\mins\upload\upload\华泰大赛参赛私募基金数据填报模板.xlsx
filefullpath = path + file_name
#print(filefullpath)
if user_upload_file:
b = xlrd.open_workbook(filefullpath)
#count = len(b.sheets())#不需要,sheet数都是固定的
for sheet in range(1, 5):
if sheet == 1:
row_name = "公司资料简介"
df_to_sql_T_1(filefullpath, sheet, row_name)
if sheet == 2:
row_name = "基金简介"
df_to_sql_T_2(filefullpath, sheet, row_name)
if sheet == 3:
row_name = "人员简介"
df_to_sql_T_3(filefullpath, sheet, row_name)
if sheet == 4:
row_name = "基金简称"
df_to_sql_4(filefullpath, sheet, row_name)
return HttpResponse('upload ok!')
else:
return redirect(to='login')
else:
uf = UserForm()
context['uf'] = uf
return render(request, 'website/templates/listing.html', context)
def index_login(request):
context = {}
if request.method == "GET":
form = AuthenticationForm
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
return redirect(to='list')
context['form'] = form
return render(request, 'register_login.html', context)
def index_register(request):
context = {}
if request.method == 'GET':
form = UserCreationForm
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect(to='login')
context['form'] = form
return render(request, 'register_login.html', context)
|
flexible
|
{
"blob_id": "d261efa72e1ab77507a1fd84aa2e462c6969af56",
"index": 6579,
"step-1": "<mask token>\n\n\ndef df_to_sql_T_1(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['org_full_name'].tolist()\n sql_number = len(fund_name_list)\n org_id_number = 0\n for org_full_name in sql_df['org_full_name'].unique():\n org_id_number = org_id_number + 1\n org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',\n (org_id, org_full_name))\n excel_name_list = excel_df['★机构全名'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['org_full_name'] == name]\n org_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'org_id']\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data['org_id'] = str(org_id)\n org_name = str(commit_data.loc[commit_data.org_full_name ==\n name, 'org_name'].values[0])\n org_full_name = str(name)\n reg_code = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_code'].values[0])\n reg_time = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_time'].values[0])\n found_date = str(commit_data.loc[commit_data.org_full_name ==\n name, 'found_date'].values[0])\n reg_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_capital'].values[0])\n real_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'real_capital'].values[0])\n region = str(commit_data.loc[commit_data.org_full_name == name,\n 'region'].values[0])\n profile = str(commit_data.loc[commit_data.org_full_name == name,\n 'profile'].values[0])\n address = str(commit_data.loc[commit_data.org_full_name == name,\n 'address'].values[0])\n team = str(commit_data.loc[commit_data.org_full_name == name,\n 'org_name'].values[0])\n fund_num = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team'].values[0])\n is_qualification = str(commit_data.loc[commit_data.\n org_full_name == name, 'is_qualification'].values[0])\n prize = str(commit_data.loc[commit_data.org_full_name == name,\n 'prize'].values[0])\n team_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team_scale'])\n investment_idea = str(commit_data.loc[commit_data.org_full_name ==\n name, 'investment_idea'].values[0])\n master_strategy = str(commit_data.loc[commit_data.org_full_name ==\n name, 'master_strategy'].values[0])\n remark = str(commit_data.loc[commit_data.org_full_name == name,\n 'remark'].values[0])\n asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'asset_mgt_scale'].values[0])\n linkman = str(commit_data.loc[commit_data.org_full_name == name,\n 'linkman'].values[0])\n linkman_duty = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_duty'].values[0])\n linkman_phone = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_phone'].values[0])\n linkman_email = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_email'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'\n )\n l = (org_name, org_full_name, reg_code, reg_time,\n found_date, reg_capital, real_capital, region, profile,\n address, team, fund_num, is_qualification, prize,\n team_scale, investment_idea, master_strategy, remark,\n asset_mgt_scale, linkman, linkman_duty, linkman_phone,\n linkman_email, org_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('org_info', con, if_exists='append', index=False\n )\n print('else')\n\n\ndef df_to_sql_T_2(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['fund_full_name'].tolist()\n sql_number = len(fund_name_list)\n fund_id_number = 0\n for fund_full_name in sql_df['fund_full_name'].unique():\n fund_id_number = fund_id_number + 1\n fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(\n fund_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'\n , (fund_id, fund_full_name))\n excel_name_list = excel_df['★基金全称'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['fund_full_name'] == name]\n fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'fund_id']\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data['fund_id'] = str(fund_id)\n group = str(commit_data.loc[commit_data.fund_full_name == name,\n 'group'].values[0])\n fund_type_strategy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_strategy'].values[0])\n reg_code = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'reg_code'].values[0])\n foundation_date = str(commit_data.loc[commit_data.\n fund_full_name == name, 'foundation_date'].values[0])\n fund_name = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_name'].values[0])\n fund_full_name = str(name)\n fund_manager = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_manager'].values[0])\n fund_manager_nominal = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_manager_nominal'].values[0])\n fund_stockbroker = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_stockbroker'].values[0])\n fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_custodian'].values[0])\n fund_member = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_member'].values[0])\n fund_type_issuance = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_issuance'].values[0])\n fund_type_structure = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_structure'].values[0])\n fund_structure = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_structure'].values[0])\n issue_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'issue_scale'].values[0])\n asset_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'asset_scale'].values[0])\n is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'is_main_fund'].values[0])\n fee_pay = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay'].values[0])\n open_date = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'open_date'])\n locked_time_limit = str(commit_data.loc[commit_data.\n fund_full_name == name, 'locked_time_limit'].values[0])\n duration = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'duration'].values[0])\n fee_manage = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_manage'].values[0])\n fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay_remark'].values[0])\n fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_redeem'].values[0])\n fee_subscription = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fee_subscription'].values[0])\n fee_trust = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_trust'].values[0])\n investment_range = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_range'].values[0])\n min_purchase_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_purchase_amount'].values[0])\n min_append_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_append_amount'].values[0])\n stop_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'stop_line'].values[0])\n alert_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'alert_line'].values[0])\n manager_participation_scale = str(commit_data.loc[commit_data.\n fund_full_name == name, 'manager_participation_scale'].\n values[0])\n investment_idea = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_idea'].values[0])\n structure_hierarchy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'structure_hierarchy'].values[0])\n remark = str(commit_data.loc[commit_data.fund_full_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n \"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?\"\n )\n l = (group, fund_type_strategy, reg_code, foundation_date,\n fund_name, fund_full_name, fund_manager,\n fund_manager_nominal, fund_stockbroker, fund_custodian,\n fund_member, fund_type_issuance, fund_type_structure,\n fund_structure, issue_scale, asset_scale, is_main_fund,\n fee_pay, open_date, locked_time_limit, duration,\n fee_manage, fee_pay_remark, fee_redeem,\n fee_subscription, fee_trust, investment_range,\n min_purchase_amount, min_append_amount, stop_line,\n alert_line, manager_participation_scale,\n investment_idea, structure_hierarchy, remark, fund_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('fund_info', con, if_exists='append', index=\n False)\n print('else')\n\n\ndef df_to_sql_T_3(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★姓名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n user_list = sql_df['user_name'].tolist()\n sql_number = len(user_list)\n user_id_number = 0\n for user_name in sql_df['user_name'].unique():\n user_id_number = user_id_number + 1\n user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(\n user_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',\n (user_id, user_name))\n excel_name_list = excel_df['★姓名'].tolist()\n for name in excel_name_list:\n if name in user_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['user_name'] == name]\n user_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'user_id']\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data['user_id'] = str(user_id)\n user_name = str(name)\n sex = str(commit_data.loc[commit_data.user_name == name, 'sex']\n .values[0])\n org_name = str(commit_data.loc[commit_data.user_name == name,\n 'org_name'].values[0])\n introduction = str(commit_data.loc[commit_data.user_name ==\n name, 'introduction'].values[0])\n photo = str(commit_data.loc[commit_data.user_name == name,\n 'photo'].values[0])\n entry_date = str(commit_data.loc[commit_data.user_name == name,\n 'entry_date'].values[0])\n investment_years = str(commit_data.loc[commit_data.user_name ==\n name, 'investment_years'].values[0])\n education = str(commit_data.loc[commit_data.user_name == name,\n 'education'].values[0])\n duty = str(commit_data.loc[commit_data.user_name == name,\n 'duty'].values[0])\n qualification = str(commit_data.loc[commit_data.user_name ==\n name, 'qualification'].values[0])\n background = str(commit_data.loc[commit_data.user_name == name,\n 'background'].values[0])\n is_fund_qualification = str(commit_data.loc[commit_data.\n user_name == name, 'is_fund_qualification'].values[0])\n is_core_member = str(commit_data.loc[commit_data.user_name ==\n name, 'is_core_member'].values[0])\n resume = str(commit_data.loc[commit_data.user_name == name,\n 'resume'].values[0])\n max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==\n name, 'max_asset_mgt_scale'].values[0])\n prize = str(commit_data.loc[commit_data.user_name == name,\n 'prize'].values[0])\n remark = str(commit_data.loc[commit_data.user_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'\n )\n l = (user_name, sex, org_name, introduction, photo,\n entry_date, investment_years, education, duty,\n qualification, background, is_fund_qualification,\n is_core_member, resume, max_asset_mgt_scale, prize,\n remark, user_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('manager_info', con, if_exists='append',\n index=False)\n print('else')\n\n\n<mask token>\n\n\ndef listing(request):\n context = {}\n if request.method == 'POST':\n uf = UserForm(request.POST, request.FILES)\n if request.user.username and uf.is_valid():\n user_upload_file = uf.cleaned_data['user_upload_file']\n profile = UserProfile()\n profile.username = request.user.username\n profile.user_upload_file = user_upload_file\n profile.save()\n file_name = request.FILES.get('user_upload_file').name\n path = (\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\upload\\\\upload\\\\'\n )\n filefullpath = path + file_name\n if user_upload_file:\n b = xlrd.open_workbook(filefullpath)\n for sheet in range(1, 5):\n if sheet == 1:\n row_name = '公司资料简介'\n df_to_sql_T_1(filefullpath, sheet, row_name)\n if sheet == 2:\n row_name = '基金简介'\n df_to_sql_T_2(filefullpath, sheet, row_name)\n if sheet == 3:\n row_name = '人员简介'\n df_to_sql_T_3(filefullpath, sheet, row_name)\n if sheet == 4:\n row_name = '基金简称'\n df_to_sql_4(filefullpath, sheet, row_name)\n return HttpResponse('upload ok!')\n else:\n return redirect(to='login')\n else:\n uf = UserForm()\n context['uf'] = uf\n return render(request, 'website/templates/listing.html', context)\n\n\n<mask token>\n\n\ndef index_register(request):\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to='login')\n context['form'] = form\n return render(request, 'register_login.html', context)\n",
"step-2": "<mask token>\n\n\ndef df_to_sql_T_1(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['org_full_name'].tolist()\n sql_number = len(fund_name_list)\n org_id_number = 0\n for org_full_name in sql_df['org_full_name'].unique():\n org_id_number = org_id_number + 1\n org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',\n (org_id, org_full_name))\n excel_name_list = excel_df['★机构全名'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['org_full_name'] == name]\n org_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'org_id']\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data['org_id'] = str(org_id)\n org_name = str(commit_data.loc[commit_data.org_full_name ==\n name, 'org_name'].values[0])\n org_full_name = str(name)\n reg_code = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_code'].values[0])\n reg_time = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_time'].values[0])\n found_date = str(commit_data.loc[commit_data.org_full_name ==\n name, 'found_date'].values[0])\n reg_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_capital'].values[0])\n real_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'real_capital'].values[0])\n region = str(commit_data.loc[commit_data.org_full_name == name,\n 'region'].values[0])\n profile = str(commit_data.loc[commit_data.org_full_name == name,\n 'profile'].values[0])\n address = str(commit_data.loc[commit_data.org_full_name == name,\n 'address'].values[0])\n team = str(commit_data.loc[commit_data.org_full_name == name,\n 'org_name'].values[0])\n fund_num = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team'].values[0])\n is_qualification = str(commit_data.loc[commit_data.\n org_full_name == name, 'is_qualification'].values[0])\n prize = str(commit_data.loc[commit_data.org_full_name == name,\n 'prize'].values[0])\n team_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team_scale'])\n investment_idea = str(commit_data.loc[commit_data.org_full_name ==\n name, 'investment_idea'].values[0])\n master_strategy = str(commit_data.loc[commit_data.org_full_name ==\n name, 'master_strategy'].values[0])\n remark = str(commit_data.loc[commit_data.org_full_name == name,\n 'remark'].values[0])\n asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'asset_mgt_scale'].values[0])\n linkman = str(commit_data.loc[commit_data.org_full_name == name,\n 'linkman'].values[0])\n linkman_duty = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_duty'].values[0])\n linkman_phone = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_phone'].values[0])\n linkman_email = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_email'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'\n )\n l = (org_name, org_full_name, reg_code, reg_time,\n found_date, reg_capital, real_capital, region, profile,\n address, team, fund_num, is_qualification, prize,\n team_scale, investment_idea, master_strategy, remark,\n asset_mgt_scale, linkman, linkman_duty, linkman_phone,\n linkman_email, org_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('org_info', con, if_exists='append', index=False\n )\n print('else')\n\n\ndef df_to_sql_T_2(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['fund_full_name'].tolist()\n sql_number = len(fund_name_list)\n fund_id_number = 0\n for fund_full_name in sql_df['fund_full_name'].unique():\n fund_id_number = fund_id_number + 1\n fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(\n fund_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'\n , (fund_id, fund_full_name))\n excel_name_list = excel_df['★基金全称'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['fund_full_name'] == name]\n fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'fund_id']\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data['fund_id'] = str(fund_id)\n group = str(commit_data.loc[commit_data.fund_full_name == name,\n 'group'].values[0])\n fund_type_strategy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_strategy'].values[0])\n reg_code = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'reg_code'].values[0])\n foundation_date = str(commit_data.loc[commit_data.\n fund_full_name == name, 'foundation_date'].values[0])\n fund_name = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_name'].values[0])\n fund_full_name = str(name)\n fund_manager = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_manager'].values[0])\n fund_manager_nominal = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_manager_nominal'].values[0])\n fund_stockbroker = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_stockbroker'].values[0])\n fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_custodian'].values[0])\n fund_member = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_member'].values[0])\n fund_type_issuance = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_issuance'].values[0])\n fund_type_structure = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_structure'].values[0])\n fund_structure = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_structure'].values[0])\n issue_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'issue_scale'].values[0])\n asset_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'asset_scale'].values[0])\n is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'is_main_fund'].values[0])\n fee_pay = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay'].values[0])\n open_date = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'open_date'])\n locked_time_limit = str(commit_data.loc[commit_data.\n fund_full_name == name, 'locked_time_limit'].values[0])\n duration = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'duration'].values[0])\n fee_manage = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_manage'].values[0])\n fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay_remark'].values[0])\n fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_redeem'].values[0])\n fee_subscription = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fee_subscription'].values[0])\n fee_trust = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_trust'].values[0])\n investment_range = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_range'].values[0])\n min_purchase_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_purchase_amount'].values[0])\n min_append_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_append_amount'].values[0])\n stop_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'stop_line'].values[0])\n alert_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'alert_line'].values[0])\n manager_participation_scale = str(commit_data.loc[commit_data.\n fund_full_name == name, 'manager_participation_scale'].\n values[0])\n investment_idea = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_idea'].values[0])\n structure_hierarchy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'structure_hierarchy'].values[0])\n remark = str(commit_data.loc[commit_data.fund_full_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n \"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?\"\n )\n l = (group, fund_type_strategy, reg_code, foundation_date,\n fund_name, fund_full_name, fund_manager,\n fund_manager_nominal, fund_stockbroker, fund_custodian,\n fund_member, fund_type_issuance, fund_type_structure,\n fund_structure, issue_scale, asset_scale, is_main_fund,\n fee_pay, open_date, locked_time_limit, duration,\n fee_manage, fee_pay_remark, fee_redeem,\n fee_subscription, fee_trust, investment_range,\n min_purchase_amount, min_append_amount, stop_line,\n alert_line, manager_participation_scale,\n investment_idea, structure_hierarchy, remark, fund_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('fund_info', con, if_exists='append', index=\n False)\n print('else')\n\n\ndef df_to_sql_T_3(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★姓名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n user_list = sql_df['user_name'].tolist()\n sql_number = len(user_list)\n user_id_number = 0\n for user_name in sql_df['user_name'].unique():\n user_id_number = user_id_number + 1\n user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(\n user_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',\n (user_id, user_name))\n excel_name_list = excel_df['★姓名'].tolist()\n for name in excel_name_list:\n if name in user_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['user_name'] == name]\n user_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'user_id']\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data['user_id'] = str(user_id)\n user_name = str(name)\n sex = str(commit_data.loc[commit_data.user_name == name, 'sex']\n .values[0])\n org_name = str(commit_data.loc[commit_data.user_name == name,\n 'org_name'].values[0])\n introduction = str(commit_data.loc[commit_data.user_name ==\n name, 'introduction'].values[0])\n photo = str(commit_data.loc[commit_data.user_name == name,\n 'photo'].values[0])\n entry_date = str(commit_data.loc[commit_data.user_name == name,\n 'entry_date'].values[0])\n investment_years = str(commit_data.loc[commit_data.user_name ==\n name, 'investment_years'].values[0])\n education = str(commit_data.loc[commit_data.user_name == name,\n 'education'].values[0])\n duty = str(commit_data.loc[commit_data.user_name == name,\n 'duty'].values[0])\n qualification = str(commit_data.loc[commit_data.user_name ==\n name, 'qualification'].values[0])\n background = str(commit_data.loc[commit_data.user_name == name,\n 'background'].values[0])\n is_fund_qualification = str(commit_data.loc[commit_data.\n user_name == name, 'is_fund_qualification'].values[0])\n is_core_member = str(commit_data.loc[commit_data.user_name ==\n name, 'is_core_member'].values[0])\n resume = str(commit_data.loc[commit_data.user_name == name,\n 'resume'].values[0])\n max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==\n name, 'max_asset_mgt_scale'].values[0])\n prize = str(commit_data.loc[commit_data.user_name == name,\n 'prize'].values[0])\n remark = str(commit_data.loc[commit_data.user_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'\n )\n l = (user_name, sex, org_name, introduction, photo,\n entry_date, investment_years, education, duty,\n qualification, background, is_fund_qualification,\n is_core_member, resume, max_asset_mgt_scale, prize,\n remark, user_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('manager_info', con, if_exists='append',\n index=False)\n print('else')\n\n\n<mask token>\n\n\ndef listing(request):\n context = {}\n if request.method == 'POST':\n uf = UserForm(request.POST, request.FILES)\n if request.user.username and uf.is_valid():\n user_upload_file = uf.cleaned_data['user_upload_file']\n profile = UserProfile()\n profile.username = request.user.username\n profile.user_upload_file = user_upload_file\n profile.save()\n file_name = request.FILES.get('user_upload_file').name\n path = (\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\upload\\\\upload\\\\'\n )\n filefullpath = path + file_name\n if user_upload_file:\n b = xlrd.open_workbook(filefullpath)\n for sheet in range(1, 5):\n if sheet == 1:\n row_name = '公司资料简介'\n df_to_sql_T_1(filefullpath, sheet, row_name)\n if sheet == 2:\n row_name = '基金简介'\n df_to_sql_T_2(filefullpath, sheet, row_name)\n if sheet == 3:\n row_name = '人员简介'\n df_to_sql_T_3(filefullpath, sheet, row_name)\n if sheet == 4:\n row_name = '基金简称'\n df_to_sql_4(filefullpath, sheet, row_name)\n return HttpResponse('upload ok!')\n else:\n return redirect(to='login')\n else:\n uf = UserForm()\n context['uf'] = uf\n return render(request, 'website/templates/listing.html', context)\n\n\ndef index_login(request):\n context = {}\n if request.method == 'GET':\n form = AuthenticationForm\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n return redirect(to='list')\n context['form'] = form\n return render(request, 'register_login.html', context)\n\n\ndef index_register(request):\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to='login')\n context['form'] = form\n return render(request, 'register_login.html', context)\n",
"step-3": "<mask token>\n\n\ndef df_to_sql_T_1(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['org_full_name'].tolist()\n sql_number = len(fund_name_list)\n org_id_number = 0\n for org_full_name in sql_df['org_full_name'].unique():\n org_id_number = org_id_number + 1\n org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',\n (org_id, org_full_name))\n excel_name_list = excel_df['★机构全名'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['org_full_name'] == name]\n org_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'org_id']\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data['org_id'] = str(org_id)\n org_name = str(commit_data.loc[commit_data.org_full_name ==\n name, 'org_name'].values[0])\n org_full_name = str(name)\n reg_code = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_code'].values[0])\n reg_time = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_time'].values[0])\n found_date = str(commit_data.loc[commit_data.org_full_name ==\n name, 'found_date'].values[0])\n reg_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_capital'].values[0])\n real_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'real_capital'].values[0])\n region = str(commit_data.loc[commit_data.org_full_name == name,\n 'region'].values[0])\n profile = str(commit_data.loc[commit_data.org_full_name == name,\n 'profile'].values[0])\n address = str(commit_data.loc[commit_data.org_full_name == name,\n 'address'].values[0])\n team = str(commit_data.loc[commit_data.org_full_name == name,\n 'org_name'].values[0])\n fund_num = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team'].values[0])\n is_qualification = str(commit_data.loc[commit_data.\n org_full_name == name, 'is_qualification'].values[0])\n prize = str(commit_data.loc[commit_data.org_full_name == name,\n 'prize'].values[0])\n team_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team_scale'])\n investment_idea = str(commit_data.loc[commit_data.org_full_name ==\n name, 'investment_idea'].values[0])\n master_strategy = str(commit_data.loc[commit_data.org_full_name ==\n name, 'master_strategy'].values[0])\n remark = str(commit_data.loc[commit_data.org_full_name == name,\n 'remark'].values[0])\n asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'asset_mgt_scale'].values[0])\n linkman = str(commit_data.loc[commit_data.org_full_name == name,\n 'linkman'].values[0])\n linkman_duty = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_duty'].values[0])\n linkman_phone = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_phone'].values[0])\n linkman_email = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_email'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'\n )\n l = (org_name, org_full_name, reg_code, reg_time,\n found_date, reg_capital, real_capital, region, profile,\n address, team, fund_num, is_qualification, prize,\n team_scale, investment_idea, master_strategy, remark,\n asset_mgt_scale, linkman, linkman_duty, linkman_phone,\n linkman_email, org_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('org_info', con, if_exists='append', index=False\n )\n print('else')\n\n\ndef df_to_sql_T_2(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['fund_full_name'].tolist()\n sql_number = len(fund_name_list)\n fund_id_number = 0\n for fund_full_name in sql_df['fund_full_name'].unique():\n fund_id_number = fund_id_number + 1\n fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(\n fund_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'\n , (fund_id, fund_full_name))\n excel_name_list = excel_df['★基金全称'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['fund_full_name'] == name]\n fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'fund_id']\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data['fund_id'] = str(fund_id)\n group = str(commit_data.loc[commit_data.fund_full_name == name,\n 'group'].values[0])\n fund_type_strategy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_strategy'].values[0])\n reg_code = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'reg_code'].values[0])\n foundation_date = str(commit_data.loc[commit_data.\n fund_full_name == name, 'foundation_date'].values[0])\n fund_name = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_name'].values[0])\n fund_full_name = str(name)\n fund_manager = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_manager'].values[0])\n fund_manager_nominal = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_manager_nominal'].values[0])\n fund_stockbroker = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_stockbroker'].values[0])\n fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_custodian'].values[0])\n fund_member = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_member'].values[0])\n fund_type_issuance = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_issuance'].values[0])\n fund_type_structure = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_structure'].values[0])\n fund_structure = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_structure'].values[0])\n issue_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'issue_scale'].values[0])\n asset_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'asset_scale'].values[0])\n is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'is_main_fund'].values[0])\n fee_pay = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay'].values[0])\n open_date = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'open_date'])\n locked_time_limit = str(commit_data.loc[commit_data.\n fund_full_name == name, 'locked_time_limit'].values[0])\n duration = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'duration'].values[0])\n fee_manage = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_manage'].values[0])\n fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay_remark'].values[0])\n fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_redeem'].values[0])\n fee_subscription = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fee_subscription'].values[0])\n fee_trust = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_trust'].values[0])\n investment_range = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_range'].values[0])\n min_purchase_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_purchase_amount'].values[0])\n min_append_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_append_amount'].values[0])\n stop_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'stop_line'].values[0])\n alert_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'alert_line'].values[0])\n manager_participation_scale = str(commit_data.loc[commit_data.\n fund_full_name == name, 'manager_participation_scale'].\n values[0])\n investment_idea = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_idea'].values[0])\n structure_hierarchy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'structure_hierarchy'].values[0])\n remark = str(commit_data.loc[commit_data.fund_full_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n \"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?\"\n )\n l = (group, fund_type_strategy, reg_code, foundation_date,\n fund_name, fund_full_name, fund_manager,\n fund_manager_nominal, fund_stockbroker, fund_custodian,\n fund_member, fund_type_issuance, fund_type_structure,\n fund_structure, issue_scale, asset_scale, is_main_fund,\n fee_pay, open_date, locked_time_limit, duration,\n fee_manage, fee_pay_remark, fee_redeem,\n fee_subscription, fee_trust, investment_range,\n min_purchase_amount, min_append_amount, stop_line,\n alert_line, manager_participation_scale,\n investment_idea, structure_hierarchy, remark, fund_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('fund_info', con, if_exists='append', index=\n False)\n print('else')\n\n\ndef df_to_sql_T_3(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★姓名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n user_list = sql_df['user_name'].tolist()\n sql_number = len(user_list)\n user_id_number = 0\n for user_name in sql_df['user_name'].unique():\n user_id_number = user_id_number + 1\n user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(\n user_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',\n (user_id, user_name))\n excel_name_list = excel_df['★姓名'].tolist()\n for name in excel_name_list:\n if name in user_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['user_name'] == name]\n user_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'user_id']\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data['user_id'] = str(user_id)\n user_name = str(name)\n sex = str(commit_data.loc[commit_data.user_name == name, 'sex']\n .values[0])\n org_name = str(commit_data.loc[commit_data.user_name == name,\n 'org_name'].values[0])\n introduction = str(commit_data.loc[commit_data.user_name ==\n name, 'introduction'].values[0])\n photo = str(commit_data.loc[commit_data.user_name == name,\n 'photo'].values[0])\n entry_date = str(commit_data.loc[commit_data.user_name == name,\n 'entry_date'].values[0])\n investment_years = str(commit_data.loc[commit_data.user_name ==\n name, 'investment_years'].values[0])\n education = str(commit_data.loc[commit_data.user_name == name,\n 'education'].values[0])\n duty = str(commit_data.loc[commit_data.user_name == name,\n 'duty'].values[0])\n qualification = str(commit_data.loc[commit_data.user_name ==\n name, 'qualification'].values[0])\n background = str(commit_data.loc[commit_data.user_name == name,\n 'background'].values[0])\n is_fund_qualification = str(commit_data.loc[commit_data.\n user_name == name, 'is_fund_qualification'].values[0])\n is_core_member = str(commit_data.loc[commit_data.user_name ==\n name, 'is_core_member'].values[0])\n resume = str(commit_data.loc[commit_data.user_name == name,\n 'resume'].values[0])\n max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==\n name, 'max_asset_mgt_scale'].values[0])\n prize = str(commit_data.loc[commit_data.user_name == name,\n 'prize'].values[0])\n remark = str(commit_data.loc[commit_data.user_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'\n )\n l = (user_name, sex, org_name, introduction, photo,\n entry_date, investment_years, education, duty,\n qualification, background, is_fund_qualification,\n is_core_member, resume, max_asset_mgt_scale, prize,\n remark, user_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('manager_info', con, if_exists='append',\n index=False)\n print('else')\n\n\ndef df_to_sql_4(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df[row_name] = excel_df[row_name].ffill()\n excel_df.index = range(len(excel_df))\n print(excel_df)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_nav_data'\n sql_df = pd.read_sql(sql, con)\n name_list = sql_df['fund_name'].tolist()\n date_list = sql_df['statistic_date'].tolist()\n print('name_list')\n print(name_list)\n print('date_list')\n print(date_list)\n for fund_name in sql_df['fund_name'].unique():\n sql = 'SELECT * FROM fund_info'\n fund_info_sql_df = pd.read_sql(sql, con)\n fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name ==\n fund_name, 'fund_id'].values[0]\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?',\n (fund_id, fund_name))\n excel_name_list = excel_df['基金简称'].tolist()\n excel_name_list = list(set(excel_name_list))\n print('excel_name_list')\n print(excel_name_list)\n for name in excel_name_list:\n statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']\n excel_date_list = statistic_date_series.tolist()\n excel_date_list = [str(i) for i in excel_date_list]\n print('excel_date_list')\n print(excel_date_list)\n for date in excel_date_list:\n if name in name_list and date in date_list:\n commit_data = excel_df[excel_df['基金简称'] == name]\n print(commit_data.columns)\n commit_data.columns = ['fund_name', 'statistic_date', 'nav',\n 'added_nav', 'total_share', 'total_asset', 'total_nav',\n 'is_split', 'is_open_date', 'split_ratio',\n 'after_tax_bonus']\n commit_data['fund_id'] = str(fund_id)\n fund_name = name\n statistic_date = str(date)\n nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'nav'].values[0])\n added_nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'added_nav'].values[0])\n total_share = str(commit_data.loc[commit_data.\n statistic_date == date, 'total_share'].values[0])\n total_asset = str(commit_data.loc[commit_data.\n statistic_date == date, 'total_asset'].values[0])\n total_nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'total_nav'].values[0])\n is_split = str(commit_data.loc[commit_data.statistic_date ==\n date, 'is_split'].values[0])\n is_open_date = str(commit_data.loc[commit_data.\n statistic_date == date, 'is_open_date'].values[0])\n split_ratio = str(commit_data.loc[commit_data.\n statistic_date == date, 'split_ratio'].values[0])\n after_tax_bonus = str(commit_data.loc[commit_data.\n statistic_date == date, 'after_tax_bonus'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?'\n )\n l = (nav, added_nav, total_share, total_asset,\n total_nav, is_split, is_open_date, split_ratio,\n after_tax_bonus, fund_name, statistic_date)\n cur.execute(sql, l)\n print('if')\n else:\n commit_data = excel_df[(excel_df['基金简称'] == name) & (\n excel_df['净值日期'] == date)]\n commit_data.columns = ['fund_name', 'statistic_date', 'nav',\n 'added_nav', 'total_share', 'total_asset', 'total_nav',\n 'is_split', 'is_open_date', 'split_ratio',\n 'after_tax_bonus']\n commit_data.to_sql('fund_nav_data', con, if_exists='append',\n index=False)\n print('else')\n\n\ndef listing(request):\n context = {}\n if request.method == 'POST':\n uf = UserForm(request.POST, request.FILES)\n if request.user.username and uf.is_valid():\n user_upload_file = uf.cleaned_data['user_upload_file']\n profile = UserProfile()\n profile.username = request.user.username\n profile.user_upload_file = user_upload_file\n profile.save()\n file_name = request.FILES.get('user_upload_file').name\n path = (\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\upload\\\\upload\\\\'\n )\n filefullpath = path + file_name\n if user_upload_file:\n b = xlrd.open_workbook(filefullpath)\n for sheet in range(1, 5):\n if sheet == 1:\n row_name = '公司资料简介'\n df_to_sql_T_1(filefullpath, sheet, row_name)\n if sheet == 2:\n row_name = '基金简介'\n df_to_sql_T_2(filefullpath, sheet, row_name)\n if sheet == 3:\n row_name = '人员简介'\n df_to_sql_T_3(filefullpath, sheet, row_name)\n if sheet == 4:\n row_name = '基金简称'\n df_to_sql_4(filefullpath, sheet, row_name)\n return HttpResponse('upload ok!')\n else:\n return redirect(to='login')\n else:\n uf = UserForm()\n context['uf'] = uf\n return render(request, 'website/templates/listing.html', context)\n\n\ndef index_login(request):\n context = {}\n if request.method == 'GET':\n form = AuthenticationForm\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n return redirect(to='list')\n context['form'] = form\n return render(request, 'register_login.html', context)\n\n\ndef index_register(request):\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to='login')\n context['form'] = form\n return render(request, 'register_login.html', context)\n",
"step-4": "from django.shortcuts import render, Http404, HttpResponse, redirect\nfrom django.contrib.auth import authenticate, login\nfrom website.form import UserForm\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom website.models import UserProfile\nfrom website.form import UserForm\nimport pandas as pd\nfrom pandas import DataFrame\nfrom sqlalchemy import create_engine\nfrom django.contrib.auth.decorators import login_required\nimport sqlite3\nimport xlrd\nimport uuid\n\n\ndef df_to_sql_T_1(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['org_full_name'].tolist()\n sql_number = len(fund_name_list)\n org_id_number = 0\n for org_full_name in sql_df['org_full_name'].unique():\n org_id_number = org_id_number + 1\n org_id = 'O' + '0' * (5 - len(str(org_id_number))) + str(org_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE org_info SET org_id=? WHERE org_full_name=?',\n (org_id, org_full_name))\n excel_name_list = excel_df['★机构全名'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM org_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['org_full_name'] == name]\n org_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'org_id']\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data['org_id'] = str(org_id)\n org_name = str(commit_data.loc[commit_data.org_full_name ==\n name, 'org_name'].values[0])\n org_full_name = str(name)\n reg_code = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_code'].values[0])\n reg_time = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_time'].values[0])\n found_date = str(commit_data.loc[commit_data.org_full_name ==\n name, 'found_date'].values[0])\n reg_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'reg_capital'].values[0])\n real_capital = str(commit_data.loc[commit_data.org_full_name ==\n name, 'real_capital'].values[0])\n region = str(commit_data.loc[commit_data.org_full_name == name,\n 'region'].values[0])\n profile = str(commit_data.loc[commit_data.org_full_name == name,\n 'profile'].values[0])\n address = str(commit_data.loc[commit_data.org_full_name == name,\n 'address'].values[0])\n team = str(commit_data.loc[commit_data.org_full_name == name,\n 'org_name'].values[0])\n fund_num = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team'].values[0])\n is_qualification = str(commit_data.loc[commit_data.\n org_full_name == name, 'is_qualification'].values[0])\n prize = str(commit_data.loc[commit_data.org_full_name == name,\n 'prize'].values[0])\n team_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'team_scale'])\n investment_idea = str(commit_data.loc[commit_data.org_full_name ==\n name, 'investment_idea'].values[0])\n master_strategy = str(commit_data.loc[commit_data.org_full_name ==\n name, 'master_strategy'].values[0])\n remark = str(commit_data.loc[commit_data.org_full_name == name,\n 'remark'].values[0])\n asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name ==\n name, 'asset_mgt_scale'].values[0])\n linkman = str(commit_data.loc[commit_data.org_full_name == name,\n 'linkman'].values[0])\n linkman_duty = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_duty'].values[0])\n linkman_phone = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_phone'].values[0])\n linkman_email = str(commit_data.loc[commit_data.org_full_name ==\n name, 'linkman_email'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?'\n )\n l = (org_name, org_full_name, reg_code, reg_time,\n found_date, reg_capital, real_capital, region, profile,\n address, team, fund_num, is_qualification, prize,\n team_scale, investment_idea, master_strategy, remark,\n asset_mgt_scale, linkman, linkman_duty, linkman_phone,\n linkman_email, org_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★机构全名'] == name]\n commit_data.columns = ['org_name', 'org_full_name', 'reg_code',\n 'reg_time', 'found_date', 'reg_capital', 'real_capital',\n 'region', 'profile', 'address', 'team', 'fund_num',\n 'is_qualification', 'prize', 'team_scale',\n 'investment_idea', 'master_strategy', 'remark',\n 'asset_mgt_scale', 'linkman', 'linkman_duty',\n 'linkman_phone', 'linkman_email']\n commit_data.loc[:, 'org_id'] = 'O' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('org_info', con, if_exists='append', index=False\n )\n print('else')\n\n\ndef df_to_sql_T_2(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['fund_full_name'].tolist()\n sql_number = len(fund_name_list)\n fund_id_number = 0\n for fund_full_name in sql_df['fund_full_name'].unique():\n fund_id_number = fund_id_number + 1\n fund_id = 'F' + '0' * (6 - len(str(fund_id_number))) + str(\n fund_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_info SET fund_id=? WHERE fund_full_name=?'\n , (fund_id, fund_full_name))\n excel_name_list = excel_df['★基金全称'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM fund_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['fund_full_name'] == name]\n fund_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'fund_id']\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data['fund_id'] = str(fund_id)\n group = str(commit_data.loc[commit_data.fund_full_name == name,\n 'group'].values[0])\n fund_type_strategy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_strategy'].values[0])\n reg_code = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'reg_code'].values[0])\n foundation_date = str(commit_data.loc[commit_data.\n fund_full_name == name, 'foundation_date'].values[0])\n fund_name = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_name'].values[0])\n fund_full_name = str(name)\n fund_manager = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_manager'].values[0])\n fund_manager_nominal = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_manager_nominal'].values[0])\n fund_stockbroker = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_stockbroker'].values[0])\n fund_custodian = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_custodian'].values[0])\n fund_member = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_member'].values[0])\n fund_type_issuance = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_issuance'].values[0])\n fund_type_structure = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fund_type_structure'].values[0])\n fund_structure = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fund_structure'].values[0])\n issue_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'issue_scale'].values[0])\n asset_scale = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'asset_scale'].values[0])\n is_main_fund = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'is_main_fund'].values[0])\n fee_pay = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay'].values[0])\n open_date = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'open_date'])\n locked_time_limit = str(commit_data.loc[commit_data.\n fund_full_name == name, 'locked_time_limit'].values[0])\n duration = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'duration'].values[0])\n fee_manage = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_manage'].values[0])\n fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_pay_remark'].values[0])\n fee_redeem = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_redeem'].values[0])\n fee_subscription = str(commit_data.loc[commit_data.\n fund_full_name == name, 'fee_subscription'].values[0])\n fee_trust = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'fee_trust'].values[0])\n investment_range = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_range'].values[0])\n min_purchase_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_purchase_amount'].values[0])\n min_append_amount = str(commit_data.loc[commit_data.\n fund_full_name == name, 'min_append_amount'].values[0])\n stop_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'stop_line'].values[0])\n alert_line = str(commit_data.loc[commit_data.fund_full_name ==\n name, 'alert_line'].values[0])\n manager_participation_scale = str(commit_data.loc[commit_data.\n fund_full_name == name, 'manager_participation_scale'].\n values[0])\n investment_idea = str(commit_data.loc[commit_data.\n fund_full_name == name, 'investment_idea'].values[0])\n structure_hierarchy = str(commit_data.loc[commit_data.\n fund_full_name == name, 'structure_hierarchy'].values[0])\n remark = str(commit_data.loc[commit_data.fund_full_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n \"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?, fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?, fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?, open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?, investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?\"\n )\n l = (group, fund_type_strategy, reg_code, foundation_date,\n fund_name, fund_full_name, fund_manager,\n fund_manager_nominal, fund_stockbroker, fund_custodian,\n fund_member, fund_type_issuance, fund_type_structure,\n fund_structure, issue_scale, asset_scale, is_main_fund,\n fee_pay, open_date, locked_time_limit, duration,\n fee_manage, fee_pay_remark, fee_redeem,\n fee_subscription, fee_trust, investment_range,\n min_purchase_amount, min_append_amount, stop_line,\n alert_line, manager_participation_scale,\n investment_idea, structure_hierarchy, remark, fund_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★基金全称'] == name]\n commit_data.columns = ['group', 'fund_type_strategy',\n 'reg_code', 'foundation_date', 'fund_name',\n 'fund_full_name', 'fund_manager', 'fund_manager_nominal',\n 'fund_stockbroker', 'fund_custodian', 'fund_member',\n 'fund_type_issuance', 'fund_type_structure',\n 'fund_structure', 'issue_scale', 'asset_scale',\n 'is_main_fund', 'fee_pay', 'open_date', 'locked_time_limit',\n 'duration', 'fee_manage', 'fee_pay_remark', 'fee_redeem',\n 'fee_subscription', 'fee_trust', 'investment_range',\n 'min_purchase_amount', 'min_append_amount', 'stop_line',\n 'alert_line', 'manager_participation_scale',\n 'investment_idea', 'structure_hierarchy', 'remark']\n commit_data.loc[:, 'fund_id'] = 'F' + '0' * (6 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('fund_info', con, if_exists='append', index=\n False)\n print('else')\n\n\ndef df_to_sql_T_3(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df = excel_df.dropna(axis=1, how='all')\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★姓名'], inplace=True)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n user_list = sql_df['user_name'].tolist()\n sql_number = len(user_list)\n user_id_number = 0\n for user_name in sql_df['user_name'].unique():\n user_id_number = user_id_number + 1\n user_id = 'M' + '0' * (5 - len(str(user_id_number))) + str(\n user_id_number)\n with con:\n cur = con.cursor()\n cur.execute('UPDATE manager_info SET user_id=? WHERE user_name=?',\n (user_id, user_name))\n excel_name_list = excel_df['★姓名'].tolist()\n for name in excel_name_list:\n if name in user_list:\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3'\n )\n sql = 'SELECT * FROM manager_info'\n sql_df = pd.read_sql(sql, con)\n name_dataframe = sql_df[sql_df['user_name'] == name]\n user_id = name_dataframe.loc[name_dataframe.last_valid_index(),\n 'user_id']\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data['user_id'] = str(user_id)\n user_name = str(name)\n sex = str(commit_data.loc[commit_data.user_name == name, 'sex']\n .values[0])\n org_name = str(commit_data.loc[commit_data.user_name == name,\n 'org_name'].values[0])\n introduction = str(commit_data.loc[commit_data.user_name ==\n name, 'introduction'].values[0])\n photo = str(commit_data.loc[commit_data.user_name == name,\n 'photo'].values[0])\n entry_date = str(commit_data.loc[commit_data.user_name == name,\n 'entry_date'].values[0])\n investment_years = str(commit_data.loc[commit_data.user_name ==\n name, 'investment_years'].values[0])\n education = str(commit_data.loc[commit_data.user_name == name,\n 'education'].values[0])\n duty = str(commit_data.loc[commit_data.user_name == name,\n 'duty'].values[0])\n qualification = str(commit_data.loc[commit_data.user_name ==\n name, 'qualification'].values[0])\n background = str(commit_data.loc[commit_data.user_name == name,\n 'background'].values[0])\n is_fund_qualification = str(commit_data.loc[commit_data.\n user_name == name, 'is_fund_qualification'].values[0])\n is_core_member = str(commit_data.loc[commit_data.user_name ==\n name, 'is_core_member'].values[0])\n resume = str(commit_data.loc[commit_data.user_name == name,\n 'resume'].values[0])\n max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name ==\n name, 'max_asset_mgt_scale'].values[0])\n prize = str(commit_data.loc[commit_data.user_name == name,\n 'prize'].values[0])\n remark = str(commit_data.loc[commit_data.user_name == name,\n 'remark'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?'\n )\n l = (user_name, sex, org_name, introduction, photo,\n entry_date, investment_years, education, duty,\n qualification, background, is_fund_qualification,\n is_core_member, resume, max_asset_mgt_scale, prize,\n remark, user_id)\n cur.execute(sql, l)\n print('if')\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df['★姓名'] == name]\n commit_data.columns = ['user_name', 'sex', 'org_name',\n 'introduction', 'photo', 'entry_date', 'investment_years',\n 'education', 'duty', 'qualification', 'background',\n 'is_fund_qualification', 'is_core_member', 'resume',\n 'max_asset_mgt_scale', 'prize', 'remark']\n commit_data.loc[:, 'user_id'] = 'M' + '0' * (5 - len(str(\n sql_number))) + str(sql_number)\n commit_data.to_sql('manager_info', con, if_exists='append',\n index=False)\n print('else')\n\n\ndef df_to_sql_4(filefullpath, sheet, row_name):\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how='all')\n excel_df[row_name] = excel_df[row_name].ffill()\n excel_df.index = range(len(excel_df))\n print(excel_df)\n con = sqlite3.connect(\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\db.sqlite3')\n sql = 'SELECT * FROM fund_nav_data'\n sql_df = pd.read_sql(sql, con)\n name_list = sql_df['fund_name'].tolist()\n date_list = sql_df['statistic_date'].tolist()\n print('name_list')\n print(name_list)\n print('date_list')\n print(date_list)\n for fund_name in sql_df['fund_name'].unique():\n sql = 'SELECT * FROM fund_info'\n fund_info_sql_df = pd.read_sql(sql, con)\n fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name ==\n fund_name, 'fund_id'].values[0]\n with con:\n cur = con.cursor()\n cur.execute('UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?',\n (fund_id, fund_name))\n excel_name_list = excel_df['基金简称'].tolist()\n excel_name_list = list(set(excel_name_list))\n print('excel_name_list')\n print(excel_name_list)\n for name in excel_name_list:\n statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']\n excel_date_list = statistic_date_series.tolist()\n excel_date_list = [str(i) for i in excel_date_list]\n print('excel_date_list')\n print(excel_date_list)\n for date in excel_date_list:\n if name in name_list and date in date_list:\n commit_data = excel_df[excel_df['基金简称'] == name]\n print(commit_data.columns)\n commit_data.columns = ['fund_name', 'statistic_date', 'nav',\n 'added_nav', 'total_share', 'total_asset', 'total_nav',\n 'is_split', 'is_open_date', 'split_ratio',\n 'after_tax_bonus']\n commit_data['fund_id'] = str(fund_id)\n fund_name = name\n statistic_date = str(date)\n nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'nav'].values[0])\n added_nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'added_nav'].values[0])\n total_share = str(commit_data.loc[commit_data.\n statistic_date == date, 'total_share'].values[0])\n total_asset = str(commit_data.loc[commit_data.\n statistic_date == date, 'total_asset'].values[0])\n total_nav = str(commit_data.loc[commit_data.statistic_date ==\n date, 'total_nav'].values[0])\n is_split = str(commit_data.loc[commit_data.statistic_date ==\n date, 'is_split'].values[0])\n is_open_date = str(commit_data.loc[commit_data.\n statistic_date == date, 'is_open_date'].values[0])\n split_ratio = str(commit_data.loc[commit_data.\n statistic_date == date, 'split_ratio'].values[0])\n after_tax_bonus = str(commit_data.loc[commit_data.\n statistic_date == date, 'after_tax_bonus'].values[0])\n with con:\n cur = con.cursor()\n sql = (\n 'UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?'\n )\n l = (nav, added_nav, total_share, total_asset,\n total_nav, is_split, is_open_date, split_ratio,\n after_tax_bonus, fund_name, statistic_date)\n cur.execute(sql, l)\n print('if')\n else:\n commit_data = excel_df[(excel_df['基金简称'] == name) & (\n excel_df['净值日期'] == date)]\n commit_data.columns = ['fund_name', 'statistic_date', 'nav',\n 'added_nav', 'total_share', 'total_asset', 'total_nav',\n 'is_split', 'is_open_date', 'split_ratio',\n 'after_tax_bonus']\n commit_data.to_sql('fund_nav_data', con, if_exists='append',\n index=False)\n print('else')\n\n\ndef listing(request):\n context = {}\n if request.method == 'POST':\n uf = UserForm(request.POST, request.FILES)\n if request.user.username and uf.is_valid():\n user_upload_file = uf.cleaned_data['user_upload_file']\n profile = UserProfile()\n profile.username = request.user.username\n profile.user_upload_file = user_upload_file\n profile.save()\n file_name = request.FILES.get('user_upload_file').name\n path = (\n 'C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\upload\\\\upload\\\\'\n )\n filefullpath = path + file_name\n if user_upload_file:\n b = xlrd.open_workbook(filefullpath)\n for sheet in range(1, 5):\n if sheet == 1:\n row_name = '公司资料简介'\n df_to_sql_T_1(filefullpath, sheet, row_name)\n if sheet == 2:\n row_name = '基金简介'\n df_to_sql_T_2(filefullpath, sheet, row_name)\n if sheet == 3:\n row_name = '人员简介'\n df_to_sql_T_3(filefullpath, sheet, row_name)\n if sheet == 4:\n row_name = '基金简称'\n df_to_sql_4(filefullpath, sheet, row_name)\n return HttpResponse('upload ok!')\n else:\n return redirect(to='login')\n else:\n uf = UserForm()\n context['uf'] = uf\n return render(request, 'website/templates/listing.html', context)\n\n\ndef index_login(request):\n context = {}\n if request.method == 'GET':\n form = AuthenticationForm\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n return redirect(to='list')\n context['form'] = form\n return render(request, 'register_login.html', context)\n\n\ndef index_register(request):\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to='login')\n context['form'] = form\n return render(request, 'register_login.html', context)\n",
"step-5": "from django.shortcuts import render, Http404, HttpResponse, redirect\nfrom django.contrib.auth import authenticate, login\nfrom website.form import UserForm\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom website.models import UserProfile\nfrom website.form import UserForm\nimport pandas as pd\nfrom pandas import DataFrame\nfrom sqlalchemy import create_engine\nfrom django.contrib.auth.decorators import login_required\nimport sqlite3\nimport xlrd\nimport uuid\n\n\ndef df_to_sql_T_1(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns\n #读取存在文件夹中的excel\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how=\"all\")\n excel_df = excel_df.dropna(axis=1, how=\"all\")\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★机构全名'], inplace=True)\n\n #数据库的读取\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM org_info\"#!!!注意sql中没有表格会出错\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['org_full_name'].tolist()\n sql_number = len(fund_name_list)\n\n\n #依次对数据库中的每一行添加一列id\n org_id_number = 0\n for org_full_name in sql_df['org_full_name'].unique():\n org_id_number = org_id_number+1\n org_id = 'O'+'0'*(5-len(str(org_id_number)))+str(org_id_number)\n with con:\n cur = con.cursor()\n cur.execute(\"\"\"UPDATE org_info SET org_id=? WHERE org_full_name=?\"\"\", (org_id, org_full_name))\n\n\n #对excel进行读取\n #excel_data = pd.read_excel(filefullpath, sheetname=sheet)\n excel_name_list = excel_df['★机构全名'].tolist()\n for name in excel_name_list:\n if name in fund_name_list:\n #提取数据库中的org_full_name为name的id\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM org_info\"\n sql_df = pd.read_sql(sql, con)\n name_dataframe =sql_df[sql_df[\"org_full_name\"] == name]\n org_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'org_id']\n\n #把excel的一行变成dataframe,并且加上id,并上传到数据库\n commit_data = excel_df[excel_df[\"★机构全名\"] == name]\n commit_data.columns = [\"org_name\", \"org_full_name\", \"reg_code\", \"reg_time\", \"found_date\", \"reg_capital\",\n \"real_capital\", \"region\", \"profile\", \"address\", \"team\", \"fund_num\",\n \"is_qualification\", \"prize\", \"team_scale\", \"investment_idea\", \"master_strategy\",\n \"remark\", \"asset_mgt_scale\", \"linkman\", \"linkman_duty\", \"linkman_phone\",\n \"linkman_email\"]\n commit_data[\"org_id\"] = str(org_id)\n\n #把一行表格dataframe提取其中的值\n org_name = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])\n org_full_name = str(name)\n reg_code = str(commit_data.loc[commit_data.org_full_name == name, 'reg_code'].values[0])\n reg_time = str(commit_data.loc[commit_data.org_full_name == name, 'reg_time'].values[0])\n found_date = str(commit_data.loc[commit_data.org_full_name == name, 'found_date'].values[0])\n reg_capital = str(commit_data.loc[commit_data.org_full_name == name, 'reg_capital'].values[0])\n real_capital = str(commit_data.loc[commit_data.org_full_name == name, 'real_capital'].values[0])\n region = str(commit_data.loc[commit_data.org_full_name == name, 'region'].values[0])\n profile = str(commit_data.loc[commit_data.org_full_name == name, 'profile'].values[0])\n address = str(commit_data.loc[commit_data.org_full_name == name, 'address'].values[0])\n team = str(commit_data.loc[commit_data.org_full_name == name, 'org_name'].values[0])\n fund_num = str(commit_data.loc[commit_data.org_full_name == name, 'team'].values[0])\n is_qualification = str(commit_data.loc[commit_data.org_full_name == name, 'is_qualification'].values[0])\n prize = str(commit_data.loc[commit_data.org_full_name == name, 'prize'].values[0])\n team_scale = str(commit_data.loc[commit_data.org_full_name == name, 'team_scale'])\n investment_idea = str(commit_data.loc[commit_data.org_full_name == name, 'investment_idea'].values[0])\n master_strategy = str(commit_data.loc[commit_data.org_full_name == name, 'master_strategy'].values[0])\n remark = str(commit_data.loc[commit_data.org_full_name == name, 'remark'].values[0])\n asset_mgt_scale = str(commit_data.loc[commit_data.org_full_name == name, 'asset_mgt_scale'].values[0])\n linkman = str(commit_data.loc[commit_data.org_full_name == name, 'linkman'].values[0])\n linkman_duty = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_duty'].values[0])\n linkman_phone = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_phone'].values[0])\n linkman_email = str(commit_data.loc[commit_data.org_full_name == name, 'linkman_email'].values[0])\n # org_name = str(commit_data.loc[index.last_valid_index(), \"org_name\"])\n\n with con:\n cur = con.cursor()\n sql = \"\"\"UPDATE org_info SET org_name=?, org_full_name=?, reg_code=?, reg_time=?, found_date=?, \\\n reg_capital=?, real_capital=?, region=?,profile=?, address=?, team=?, fund_num=?, is_qualification=?, \\\n prize=?, team_scale=?, investment_idea=?, master_strategy=?, remark=?, asset_mgt_scale=?, linkman=?, \\\n linkman_duty=?, linkman_phone=?, linkman_email=? WHERE org_id=?\"\"\"\n l = (org_name, org_full_name, reg_code, reg_time, found_date, reg_capital, real_capital, region, profile,\\\n address, team, fund_num, is_qualification, prize, team_scale, investment_idea, master_strategy, remark,\\\n asset_mgt_scale, linkman, linkman_duty, linkman_phone, linkman_email, org_id)\n cur.execute(sql, l)\n print(\"if\")\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df[\"★机构全名\"] == name]\n commit_data.columns = [\"org_name\", \"org_full_name\", \"reg_code\", \"reg_time\", \"found_date\", \"reg_capital\",\n \"real_capital\", \"region\", \"profile\", \"address\", \"team\", \"fund_num\",\n \"is_qualification\", \"prize\", \"team_scale\", \"investment_idea\", \"master_strategy\",\n \"remark\", \"asset_mgt_scale\", \"linkman\", \"linkman_duty\", \"linkman_phone\",\n \"linkman_email\"]\n commit_data.loc[:, \"org_id\"] = 'O'+'0'*(5-len(str(sql_number)))+str(sql_number)\n commit_data.to_sql(\"org_info\", con, if_exists=\"append\", index=False)\n print(\"else\")\n\ndef df_to_sql_T_2(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns\n #读取存在文件夹中的excel\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how=\"all\")\n excel_df = excel_df.dropna(axis=1, how=\"all\")\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★基金全称'], inplace=True)\n\n #数据库的读取\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM fund_info\"#!!!注意sql中没有表格会出错\n sql_df = pd.read_sql(sql, con)\n fund_name_list = sql_df['fund_full_name'].tolist()#list\n sql_number = len(fund_name_list)\n\n\n #依次对数据库中的每一行添加一列id\n fund_id_number = 0\n for fund_full_name in sql_df['fund_full_name'].unique():\n fund_id_number = fund_id_number+1\n fund_id = 'F'+'0'*(6-len(str(fund_id_number)))+str(fund_id_number)\n with con:\n cur = con.cursor()\n cur.execute(\"\"\"UPDATE fund_info SET fund_id=? WHERE fund_full_name=?\"\"\", (fund_id, fund_full_name))\n\n\n #对excel进行读取\n #excel_data = pd.read_excel(filefullpath, sheetname=sheet)\n excel_name_list = excel_df['★基金全称'].tolist()#list\n for name in excel_name_list:\n if name in fund_name_list:\n #提取数据库中的org_full_name为name的id\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM fund_info\"\n sql_df = pd.read_sql(sql, con)\n name_dataframe =sql_df[sql_df[\"fund_full_name\"] == name]\n fund_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'fund_id']\n\n #把excel的一行变成dataframe,并且加上id,并上传到数据库\n commit_data = excel_df[excel_df[\"★基金全称\"] == name]\n commit_data.columns = [\"group\", \"fund_type_strategy\", \"reg_code\", \"foundation_date\", \"fund_name\",\n \"fund_full_name\", \"fund_manager\", \"fund_manager_nominal\", \"fund_stockbroker\",\n \"fund_custodian\", \"fund_member\", \"fund_type_issuance\", \"fund_type_structure\",\n \"fund_structure\", \"issue_scale\", \"asset_scale\", \"is_main_fund\", \"fee_pay\",\n \"open_date\", \"locked_time_limit\", \"duration\", \"fee_manage\", \"fee_pay_remark\",\n \"fee_redeem\", \"fee_subscription\", \"fee_trust\", \"investment_range\",\n \"min_purchase_amount\", \"min_append_amount\", \"stop_line\", \"alert_line\",\n \"manager_participation_scale\", \"investment_idea\", \"structure_hierarchy\", \"remark\"]\n commit_data[\"fund_id\"] = str(fund_id)\n\n #把一行表格dataframe提取其中的值\n group = str(commit_data.loc[commit_data.fund_full_name == name, 'group'].values[0])\n fund_type_strategy = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_strategy'].values[0])\n reg_code = str(commit_data.loc[commit_data.fund_full_name == name, 'reg_code'].values[0])\n foundation_date = str(commit_data.loc[commit_data.fund_full_name == name, 'foundation_date'].values[0])\n fund_name = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_name'].values[0])\n fund_full_name = str(name)\n fund_manager = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager'].values[0])\n fund_manager_nominal = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_manager_nominal'].values[0])\n fund_stockbroker = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_stockbroker'].values[0])\n fund_custodian = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_custodian'].values[0])\n fund_member = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_member'].values[0])\n fund_type_issuance = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_issuance'].values[0])\n fund_type_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_type_structure'].values[0])\n fund_structure = str(commit_data.loc[commit_data.fund_full_name == name, 'fund_structure'].values[0])\n issue_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'issue_scale'].values[0])\n asset_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'asset_scale'].values[0])\n is_main_fund = str(commit_data.loc[commit_data.fund_full_name == name, 'is_main_fund'].values[0])\n fee_pay = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay'].values[0])\n open_date = str(commit_data.loc[commit_data.fund_full_name == name, 'open_date'])\n locked_time_limit = str(commit_data.loc[commit_data.fund_full_name == name, 'locked_time_limit'].values[0])\n duration = str(commit_data.loc[commit_data.fund_full_name == name, 'duration'].values[0])\n fee_manage = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_manage'].values[0])\n fee_pay_remark = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_pay_remark'].values[0])\n fee_redeem = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_redeem'].values[0])\n fee_subscription = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_subscription'].values[0])\n fee_trust = str(commit_data.loc[commit_data.fund_full_name == name, 'fee_trust'].values[0])\n investment_range = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_range'].values[0])\n min_purchase_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_purchase_amount'].values[0])\n min_append_amount = str(commit_data.loc[commit_data.fund_full_name == name, 'min_append_amount'].values[0])\n stop_line = str(commit_data.loc[commit_data.fund_full_name == name, 'stop_line'].values[0])\n alert_line = str(commit_data.loc[commit_data.fund_full_name == name, 'alert_line'].values[0])\n manager_participation_scale = str(commit_data.loc[commit_data.fund_full_name == name, 'manager_participation_scale'].values[0])\n investment_idea = str(commit_data.loc[commit_data.fund_full_name == name, 'investment_idea'].values[0])\n structure_hierarchy = str(commit_data.loc[commit_data.fund_full_name == name, 'structure_hierarchy'].values[0])\n remark = str(commit_data.loc[commit_data.fund_full_name == name, 'remark'].values[0])\n\n with con:\n cur = con.cursor()\n sql = \"\"\"UPDATE fund_info SET 'group'=?, fund_type_strategy=?, reg_code=?, foundation_date=?, fund_name=?,\\\n fund_full_name=?, fund_manager=?, fund_manager_nominal=?, fund_stockbroker=?, fund_custodian=?, fund_member=?,\\\n fund_type_issuance=?, fund_type_structure=?, fund_structure=?, issue_scale=?, asset_scale=?, is_main_fund=?, fee_pay=?,\\\n open_date=?, locked_time_limit=?, duration=?, fee_manage=?, fee_pay_remark=?, fee_redeem=?, fee_subscription=?, fee_trust=?,\\\n investment_range=?, min_purchase_amount=?, min_append_amount=?, stop_line=?, alert_line=?, manager_participation_scale=?, \\\n investment_idea=?, structure_hierarchy=?, remark=? WHERE fund_id=?\"\"\"\n l = (group, fund_type_strategy, reg_code, foundation_date, fund_name, fund_full_name, fund_manager, \\\n fund_manager_nominal, fund_stockbroker, fund_custodian, fund_member, fund_type_issuance, \\\n fund_type_structure, fund_structure, issue_scale, asset_scale, is_main_fund, fee_pay, open_date, \\\n locked_time_limit, duration, fee_manage, fee_pay_remark, fee_redeem, fee_subscription, fee_trust, \\\n investment_range, min_purchase_amount, min_append_amount, stop_line, alert_line, manager_participation_scale, \\\n investment_idea, structure_hierarchy, remark, fund_id)\n cur.execute(sql, l)\n print(\"if\")\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df[\"★基金全称\"] == name]\n commit_data.columns = [\"group\", \"fund_type_strategy\", \"reg_code\", \"foundation_date\", \"fund_name\", \"fund_full_name\", \\\n \"fund_manager\", \"fund_manager_nominal\", \"fund_stockbroker\", \"fund_custodian\", \"fund_member\", \\\n \"fund_type_issuance\", \"fund_type_structure\", \"fund_structure\", \"issue_scale\", \"asset_scale\", \\\n \"is_main_fund\", \"fee_pay\", \"open_date\", \"locked_time_limit\", \"duration\", \"fee_manage\", \\\n \"fee_pay_remark\", \"fee_redeem\", \"fee_subscription\", \"fee_trust\", \"investment_range\", \\\n \"min_purchase_amount\", \"min_append_amount\", \"stop_line\", \"alert_line\", \"manager_participation_scale\", \\\n \"investment_idea\", \"structure_hierarchy\", \"remark\"]\n commit_data.loc[:, \"fund_id\"] = 'F'+'0'*(6-len(str(sql_number)))+str(sql_number)\n commit_data.to_sql(\"fund_info\", con, if_exists=\"append\", index=False)\n print(\"else\")\n\ndef df_to_sql_T_3(filefullpath, sheet, row_name):#路径名,sheet为sheet数,row_name为指定行为columns\n #读取存在文件夹中的excel\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how=\"all\")\n excel_df = excel_df.dropna(axis=1, how=\"all\")\n excel_df = excel_df.T\n excel_df.columns = excel_df.loc[row_name]#把【人员简介】的这一行变成columns这一列\n excel_df = excel_df.drop(row_name, axis=0, inplace=False)#去除【人员简介】这一行\n excel_df.index = range(len(excel_df))\n excel_df.drop_duplicates(subset=['★姓名'], inplace=True)\n\n #数据库的读取\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM manager_info\"#!!!注意sql中没有表格会出错\n sql_df = pd.read_sql(sql, con)\n user_list = sql_df['user_name'].tolist()#list\n sql_number = len(user_list)\n\n\n #依次对数据库中的每一行添加一列id\n user_id_number = 0\n for user_name in sql_df['user_name'].unique():\n user_id_number = user_id_number+1\n user_id = 'M'+'0'*(5-len(str(user_id_number)))+str(user_id_number)\n with con:\n cur = con.cursor()\n cur.execute(\"\"\"UPDATE manager_info SET user_id=? WHERE user_name=?\"\"\", (user_id, user_name))\n\n\n #对excel进行读取\n #excel_data = pd.read_excel(filefullpath, sheetname=sheet)\n excel_name_list = excel_df['★姓名'].tolist()#list\n for name in excel_name_list:\n if name in user_list:\n #提取数据库中的user_name为name的id\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM manager_info\"\n sql_df = pd.read_sql(sql, con)\n name_dataframe =sql_df[sql_df[\"user_name\"] == name]\n user_id = name_dataframe.loc[name_dataframe.last_valid_index(), 'user_id']#loc到最后一个有效的index和fund_id,取出值\n\n #把excel的一行变成dataframe,并且加上id,并上传到数据库\n commit_data = excel_df[excel_df[\"★姓名\"] == name]\n commit_data.columns = [\"user_name\", \"sex\", \"org_name\", \"introduction\", \"photo\", \"entry_date\",\n \"investment_years\", \"education\", \"duty\", \"qualification\", \"background\", \"is_fund_qualification\",\n \"is_core_member\", \"resume\", \"max_asset_mgt_scale\", \"prize\", \"remark\"]\n commit_data[\"user_id\"] = str(user_id)#不需要\n\n #把一行表格dataframe提取其中的值\n user_name = str(name)\n sex = str(commit_data.loc[commit_data.user_name == name, 'sex'].values[0])\n org_name = str(commit_data.loc[commit_data.user_name == name, 'org_name'].values[0])\n introduction = str(commit_data.loc[commit_data.user_name == name, 'introduction'].values[0])\n photo = str(commit_data.loc[commit_data.user_name == name, 'photo'].values[0])\n entry_date = str(commit_data.loc[commit_data.user_name == name, 'entry_date'].values[0])\n investment_years = str(commit_data.loc[commit_data.user_name == name, 'investment_years'].values[0])\n education = str(commit_data.loc[commit_data.user_name == name, 'education'].values[0])\n duty = str(commit_data.loc[commit_data.user_name == name, 'duty'].values[0])\n qualification = str(commit_data.loc[commit_data.user_name == name, 'qualification'].values[0])\n background = str(commit_data.loc[commit_data.user_name == name, 'background'].values[0])\n is_fund_qualification = str(commit_data.loc[commit_data.user_name == name, 'is_fund_qualification'].values[0])\n is_core_member = str(commit_data.loc[commit_data.user_name == name, 'is_core_member'].values[0])\n resume = str(commit_data.loc[commit_data.user_name == name, 'resume'].values[0])\n max_asset_mgt_scale = str(commit_data.loc[commit_data.user_name == name, 'max_asset_mgt_scale'].values[0])\n prize = str(commit_data.loc[commit_data.user_name == name, 'prize'].values[0])\n remark = str(commit_data.loc[commit_data.user_name == name, 'remark'].values[0])\n\n with con:\n cur = con.cursor()\n sql = \"\"\"UPDATE manager_info SET user_name=?, sex=?, org_name=?, introduction=?, photo=?, \\\n entry_date=?, investment_years=?, education=?, duty=?, qualification=?, background=?, is_fund_qualification=?, \\\n is_core_member=?, resume=?, max_asset_mgt_scale=?, prize=?, remark=? WHERE user_id=?\"\"\"\n l = (user_name, sex, org_name, introduction, photo, entry_date, investment_years, education, \\\n duty, qualification, background, is_fund_qualification, is_core_member, resume, max_asset_mgt_scale, \\\n prize, remark, user_id)\n cur.execute(sql, l)\n print(\"if\")\n else:\n sql_number = sql_number + 1\n commit_data = excel_df[excel_df[\"★姓名\"] == name]\n commit_data.columns = [\"user_name\", \"sex\", \"org_name\", \"introduction\", \"photo\", \"entry_date\", \\\n \"investment_years\", \"education\", \"duty\", \"qualification\", \"background\", \\\n \"is_fund_qualification\", \"is_core_member\", \"resume\", \"max_asset_mgt_scale\", \"prize\", \\\n \"remark\"]\n commit_data.loc[:, \"user_id\"] = 'M'+'0'*(5-len(str(sql_number)))+str(sql_number)\n commit_data.to_sql(\"manager_info\", con, if_exists=\"append\", index=False)\n print(\"else\")\n\ndef df_to_sql_4(filefullpath, sheet, row_name):\n #读取处理文件夹中的excel\n excel_df = pd.read_excel(filefullpath, sheetname=sheet)\n excel_df = excel_df.dropna(how=\"all\")\n #excel_df = excel_df.dropna(axis=1, how=\"all\")\n excel_df[row_name] = excel_df[row_name].ffill()\n excel_df.index = range(len(excel_df))\n print(excel_df)\n\n #数据库的读取\n con = sqlite3.connect(r\"C:\\Users\\K\\Desktop\\excel-upload-sqlite3\\mins\\db.sqlite3\")\n sql = \"SELECT * FROM fund_nav_data\"\n sql_df = pd.read_sql(sql, con)\n name_list = sql_df['fund_name'].tolist()\n date_list = sql_df['statistic_date'].tolist()\n print(\"name_list\")\n #print(type(name_list[0]))\n print(name_list)\n print(\"date_list\")\n #print(type(date_list[0]))\n print(date_list)\n\n #从fund_info数据表中提取出fund_id,加入fund_nav_data数据表中的fund_id\n for fund_name in sql_df['fund_name'].unique():\n sql = \"SELECT * FROM fund_info\"\n fund_info_sql_df = pd.read_sql(sql, con)\n fund_id = fund_info_sql_df.loc[fund_info_sql_df.fund_name == fund_name, 'fund_id'].values[0]\n with con:\n cur = con.cursor()\n cur.execute(\"\"\"UPDATE fund_nav_data SET fund_id=? WHERE fund_name=?\"\"\", (fund_id, fund_name))\n\n #对excel_df进行读取\n excel_name_list = excel_df['基金简称'].tolist()\n excel_name_list = list(set(excel_name_list))\n print(\"excel_name_list\")\n #print(type(excel_name_list[0]))\n print(excel_name_list)\n\n for name in excel_name_list:\n statistic_date_series = excel_df.loc[excel_df['基金简称'] == name, '净值日期']\n excel_date_list = statistic_date_series.tolist()\n excel_date_list = [str(i) for i in excel_date_list]\n print(\"excel_date_list\")\n #print(type(excel_date_list[0]))\n print(excel_date_list)\n for date in excel_date_list:\n if name in name_list and date in date_list:\n commit_data = excel_df[excel_df['基金简称'] == name]\n print(commit_data.columns)\n commit_data.columns = [\"fund_name\", \"statistic_date\", \"nav\", \"added_nav\", \"total_share\", \"total_asset\", \"total_nav\", \"is_split\", \"is_open_date\", \"split_ratio\", \"after_tax_bonus\"]\n commit_data[\"fund_id\"] = str(fund_id)\n\n fund_name = name\n statistic_date = str(date)\n nav = str(commit_data.loc[commit_data.statistic_date == date, 'nav'].values[0])\n added_nav = str(commit_data.loc[commit_data.statistic_date == date, 'added_nav'].values[0])\n total_share = str(commit_data.loc[commit_data.statistic_date == date, 'total_share'].values[0])\n total_asset = str(commit_data.loc[commit_data.statistic_date == date, 'total_asset'].values[0])\n total_nav = str(commit_data.loc[commit_data.statistic_date == date, 'total_nav'].values[0])\n is_split = str(commit_data.loc[commit_data.statistic_date == date, 'is_split'].values[0])\n is_open_date = str(commit_data.loc[commit_data.statistic_date == date, 'is_open_date'].values[0])\n split_ratio = str(commit_data.loc[commit_data.statistic_date == date, 'split_ratio'].values[0])\n after_tax_bonus = str(commit_data.loc[commit_data.statistic_date == date, 'after_tax_bonus'].values[0])\n\n with con:\n cur = con.cursor()\n sql = \"\"\"UPDATE fund_nav_data SET nav=?, added_nav=?, total_share=?, total_asset=?, total_nav=?, is_split=?, is_open_date=?, split_ratio=?, after_tax_bonus=? WHERE fund_name=? AND statistic_date=?\"\"\"\n l = (nav, added_nav, total_share, total_asset, total_nav, is_split, is_open_date, split_ratio, after_tax_bonus, fund_name, statistic_date)\n cur.execute(sql, l)\n print(\"if\")\n else:\n commit_data = excel_df[(excel_df[\"基金简称\"] == name)&(excel_df[\"净值日期\"] == date)]\n commit_data.columns = [\"fund_name\", \"statistic_date\", \"nav\", \"added_nav\", \"total_share\", \"total_asset\", \"total_nav\", \"is_split\", \"is_open_date\", \"split_ratio\", \"after_tax_bonus\"]\n commit_data.to_sql(\"fund_nav_data\", con, if_exists=\"append\", index=False)\n print(\"else\")\n\ndef listing(request):\n context = {}\n if request.method == \"POST\":\n uf = UserForm(request.POST, request.FILES)\n if request.user.username and uf.is_valid():\n #username = uf.cleaned_data['username']\n user_upload_file = uf.cleaned_data['user_upload_file']\n #写入数据库\n profile = UserProfile()\n profile.username = request.user.username\n profile.user_upload_file = user_upload_file\n profile.save()\n file_name = request.FILES.get('user_upload_file').name\n path = \"C:\\\\Users\\\\K\\\\Desktop\\\\excel-upload-sqlite3\\\\mins\\\\upload\\\\upload\\\\\"\n #C:\\Users\\K\\Desktop\\excel - upload - sqlite3\\excel - upload - sqlite3\\mins\\upload\\upload\\华泰大赛参赛私募基金数据填报模板.xlsx\n filefullpath = path + file_name\n #print(filefullpath)\n if user_upload_file:\n b = xlrd.open_workbook(filefullpath)\n #count = len(b.sheets())#不需要,sheet数都是固定的\n for sheet in range(1, 5):\n if sheet == 1:\n row_name = \"公司资料简介\"\n df_to_sql_T_1(filefullpath, sheet, row_name)\n if sheet == 2:\n row_name = \"基金简介\"\n df_to_sql_T_2(filefullpath, sheet, row_name)\n if sheet == 3:\n row_name = \"人员简介\"\n df_to_sql_T_3(filefullpath, sheet, row_name)\n if sheet == 4:\n row_name = \"基金简称\"\n df_to_sql_4(filefullpath, sheet, row_name)\n return HttpResponse('upload ok!')\n else:\n return redirect(to='login')\n else:\n uf = UserForm()\n context['uf'] = uf\n return render(request, 'website/templates/listing.html', context)\n\ndef index_login(request):\n context = {}\n if request.method == \"GET\":\n form = AuthenticationForm\n if request.method == \"POST\":\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n login(request, form.get_user())\n return redirect(to='list')\n context['form'] = form\n return render(request, 'register_login.html', context)\n\ndef index_register(request):\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(to='login')\n context['form'] = form\n return render(request, 'register_login.html', context)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='RBM', version='0.0.1', description=
'Restricted Boltzmann Machines', long_description='README',
install_requires=['numpy', 'pandas'])
<|reserved_special_token_1|>
from distutils.core import setup
setup(name='RBM', version='0.0.1', description=
'Restricted Boltzmann Machines', long_description='README',
install_requires=['numpy', 'pandas'])
<|reserved_special_token_1|>
#!/usr/bin/env python
from distutils.core import setup
setup(
name='RBM',
version='0.0.1',
description='Restricted Boltzmann Machines',
long_description='README',
install_requires=['numpy','pandas'],
)
|
flexible
|
{
"blob_id": "fab7ee8a7336ba2c044adce4cc8483af78b775ba",
"index": 1827,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-3": "from distutils.core import setup\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-4": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(\n name='RBM',\n version='0.0.1',\n description='Restricted Boltzmann Machines',\n long_description='README',\n install_requires=['numpy','pandas'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#coding=utf-8
import unittest,time,os
from time import sleep
from appium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from HTMLTestRunner import HTMLTestRunner
from appium.webdriver.common.touch_action import TouchAction
from pub_Student import login,logout
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class TestStudent(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '7.0'
desired_caps['automationName'] = 'UIAutomator2'
desired_caps['deviceName'] = 'PRA-AL00'
#desired_caps['udid'] = 'HMKNW17225011700'
desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')
desired_caps['appPackage'] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['fullReset'] = True
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
sleep(3)
def tearDown(self):
# end the session
self.driver.quit()
def changePwd(self):
driver=self.driver
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----开始:'+now)
login(self)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("修改密码")').click()
sleep(2)
old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456')
sleep(1)
new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456wxl')
sleep(1)
#com.android.gallery3d:id/head_select_right
again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456wxl')
sleep(1)
driver.find_element_by_android_uiautomator('new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(4)
now=time.strftime('%Y-%m-%d %H_%M_%S')
sf0='./'+now+'_021b_relogin_R.png'
driver.get_screenshot_as_file(sf0)
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:修改密码----结束:'+now)
def changePwdBack(self):
driver=self.driver
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----开始:'+now)
driver.find_element_by_android_uiautomator('new UiSelector().text("登录")').click()
sleep(2)
user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')
user.click()
user.set_value('13923121234')
sleep(1)
pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')
pwd.click()
pwd.set_value('123456wxl')
sleep(1)
driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("始终允许")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("始终允许")').click()
sleep(2)
#test now
driver.find_element_by_android_uiautomator('new UiSelector().text("开始测试")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("点击开始录音")').click()
sleep(4)
driver.find_element_by_android_uiautomator('new UiSelector().text("停止录音")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("有听到声音")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("下一步")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("您已完成测试")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("个人中心")').click()
sleep(3)
driver.find_element_by_android_uiautomator('new UiSelector().text("修改密码")').click()
sleep(2)
old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')
old.click()
old.set_value('123456wxl')
sleep(1)
new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')
new.click()
new.set_value('123456')
sleep(1)
#com.android.gallery3d:id/head_select_right
again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')
again.click()
again.set_value('123456')
sleep(1)
driver.find_element_by_android_uiautomator('new UiSelector().text("确认")').click()
sleep(3)
driver.swipe(1000,1600,1000,1250,1000)
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("退出登录")').click()
sleep(2)
driver.find_element_by_android_uiautomator('new UiSelector().text("确定")').click()
sleep(2)
now=time.strftime('%Y-%m-%d %H_%M_%S')
print('\n021:重置密码----结束:'+now)
if __name__ == '__main__':
testunit=unittest.TestSuite()
#testunit.addTest(TestStudent('changePwd'))
testunit.addTest(TestStudent('changePwdBack'))
now=time.strftime('%Y-%m-%d %H_%M_%S')
filename='./'+now+'_021b_result_R.html'
fp=open(filename,'wb')
runner=HTMLTestRunner(stream=fp,title='测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',
description='自动化测试脚本运行状态:')
runner.run(testunit)
fp.close()
|
normal
|
{
"blob_id": "8d7697a0e49dc9e966b9657171c66ccda57279d6",
"index": 1930,
"step-1": "<mask token>\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-3": "<mask token>\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-4": "import unittest, time, os\nfrom time import sleep\nfrom appium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom HTMLTestRunner import HTMLTestRunner\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom pub_Student import login, logout\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'\n ] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',\n desired_caps)\n sleep(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def changePwd(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:' + now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n sf0 = './' + now + '_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:' + now)\n\n def changePwdBack(self):\n driver = self.driver\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:' + now)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"登录\")').click()\n sleep(2)\n user = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n again = driver.find_element_by_id(\n 'com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000, 1600, 1000, 1250, 1000)\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator(\n 'new UiSelector().text(\"确定\")').click()\n sleep(2)\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:' + now)\n\n\nif __name__ == '__main__':\n testunit = unittest.TestSuite()\n testunit.addTest(TestStudent('changePwdBack'))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = './' + now + '_021b_result_R.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(stream=fp, title=\n '测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-5": "#coding=utf-8\nimport unittest,time,os\nfrom time import sleep\nfrom appium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom HTMLTestRunner import HTMLTestRunner\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom pub_Student import login,logout\n\n# Returns abs path relative to this file and not cwd\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\n\nclass TestStudent(unittest.TestCase):\n\n def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '7.0'\n desired_caps['automationName'] = 'UIAutomator2'\n desired_caps['deviceName'] = 'PRA-AL00'\n #desired_caps['udid'] = 'HMKNW17225011700'\n desired_caps['app'] = PATH('../VIPStudent_2.0.4.apk')\n desired_caps['appPackage'] = 'com.pnlyy.pnlclass.pnlclass_student.ceshi'\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n desired_caps['fullReset'] = True\n\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n sleep(3)\n\n def tearDown(self):\n # end the session\n self.driver.quit()\n \n def changePwd(self):\n driver=self.driver\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----开始:'+now)\n login(self)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456')\n sleep(1)\n new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456wxl')\n sleep(1)\n #com.android.gallery3d:id/head_select_right\n again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(4)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n sf0='./'+now+'_021b_relogin_R.png'\n driver.get_screenshot_as_file(sf0)\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:修改密码----结束:'+now)\n\n def changePwdBack(self):\n driver=self.driver\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----开始:'+now)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"登录\")').click()\n sleep(2)\n user=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etUserName')\n user.click()\n user.set_value('13923121234')\n sleep(1)\n pwd=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etPassword')\n pwd.click()\n pwd.set_value('123456wxl')\n sleep(1)\n driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/btnLogin').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"始终允许\")').click()\n sleep(2)\n #test now\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"开始测试\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"点击开始录音\")').click()\n sleep(4)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"停止录音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"有听到声音\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"下一步\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"您已完成测试\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"个人中心\")').click()\n sleep(3)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"修改密码\")').click()\n sleep(2)\n old=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etOldPass')\n old.click()\n old.set_value('123456wxl')\n sleep(1)\n new=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etNewPass')\n new.click()\n new.set_value('123456')\n sleep(1)\n #com.android.gallery3d:id/head_select_right\n again=driver.find_element_by_id('com.pnlyy.pnlclass.pnlclass_student.ceshi:id/etConfirmNewPass')\n again.click()\n again.set_value('123456')\n sleep(1)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确认\")').click()\n sleep(3)\n driver.swipe(1000,1600,1000,1250,1000)\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"退出登录\")').click()\n sleep(2)\n driver.find_element_by_android_uiautomator('new UiSelector().text(\"确定\")').click()\n sleep(2)\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n print('\\n021:重置密码----结束:'+now)\n\nif __name__ == '__main__':\n testunit=unittest.TestSuite()\n #testunit.addTest(TestStudent('changePwd'))\n testunit.addTest(TestStudent('changePwdBack'))\n now=time.strftime('%Y-%m-%d %H_%M_%S')\n filename='./'+now+'_021b_result_R.html'\n fp=open(filename,'wb')\n runner=HTMLTestRunner(stream=fp,title='测试学生版android7.0真机(Honor8Lite)[修改密码/重置密码]测试报告by Appium',\n description='自动化测试脚本运行状态:')\n runner.run(testunit)\n fp.close()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register(r'lamps', LampViewSet)
|
normal
|
{
"blob_id": "aff1d702e591efcfc0fc93150a3fbec532408137",
"index": 55,
"step-1": "<mask token>\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-4": "from rest_framework import serializers, viewsets, routers\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-5": "from rest_framework import serializers, viewsets, routers\n\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'lamps', LampViewSet)\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.