code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import os, time
def counter(count): # run in new process
for i in range(count):
time.sleep(1) # simulate real work
print('[%s] => %s' % (os.getpid(), i))
import pdb;pdb.set_trace()
for i in range(5):
pid= os.fork()
if pid != 0:
print('Process %d spawned' % pid) # in parent: continue
else:
counter(5) # else in child/new process
os._exit(0) # run function and exit
print('Main process exiting.')
|
normal
|
{
"blob_id": "fd564d09d7320fd444ed6eec7e51afa4d065ec4d",
"index": 6945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\n<mask token>\npdb.set_trace()\nfor i in range(5):\n pid = os.fork()\n if pid != 0:\n print('Process %d spawned' % pid)\n else:\n counter(5)\n os._exit(0)\nprint('Main process exiting.')\n",
"step-4": "import os, time\n\n\ndef counter(count):\n for i in range(count):\n time.sleep(1)\n print('[%s] => %s' % (os.getpid(), i))\n\n\nimport pdb\npdb.set_trace()\nfor i in range(5):\n pid = os.fork()\n if pid != 0:\n print('Process %d spawned' % pid)\n else:\n counter(5)\n os._exit(0)\nprint('Main process exiting.')\n",
"step-5": "import os, time\ndef counter(count): # run in new process\n for i in range(count):\n time.sleep(1) # simulate real work\n print('[%s] => %s' % (os.getpid(), i))\n\nimport pdb;pdb.set_trace()\nfor i in range(5):\n pid= os.fork()\n if pid != 0:\n print('Process %d spawned' % pid) # in parent: continue\n else:\n counter(5) # else in child/new process\n os._exit(0) # run function and exit\n\nprint('Main process exiting.') \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from tkinter import *
global math
root = Tk()
root.title("Calculator")
e = Entry(root,width=60,borderwidth=5)
e.grid(columnspan=3)
def button_click(number):
#e.delete(0, END)
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = "addition"
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, f_num + int(second_number))
if math == "subtraction":
e.insert(0, f_num - int(second_number))
if math == "multiplication":
e.insert(0, f_num * int(second_number))
if math == "division":
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = "division"
f_num = int(first_number)
e.delete(0, END)
buttonClear = Button(root,width=52,height=8,text="Clear",command=button_clear)
buttonClear.grid(row=1,column=0,columnspan=3)
button7 = Button(root,width=16,height=8,text="7",command=lambda: button_click(7)).grid(row=3,column=0)
button8 = Button(root,width=16,height=8,text="8",command=lambda: button_click(8)).grid(row=3,column=1)
button9 = Button(root,width=16,height=8,text="9",command=lambda: button_click(9)).grid(row=3,column=2)
button4 = Button(root,width=16,height=8,text="4",command=lambda: button_click(4)).grid(row=4,column=0)
button5 = Button(root,width=16,height=8,text="5",command=lambda: button_click(5)).grid(row=4,column=1)
button6 = Button(root,width=16,height=8,text="6",command=lambda: button_click(6)).grid(row=4,column=2)
button1 = Button(root,width=16,height=8,text="1",command=lambda: button_click(1)).grid(row=5,column=0)
button2 = Button(root,width=16,height=8,text="2",command=lambda: button_click(2)).grid(row=5,column=1)
button3 = Button(root,width=16,height=8,text="3",command=lambda: button_click(3)).grid(row=5,column=2)
button0 = Button(root,width=16,height=8,text="0",command=lambda: button_click(0)).grid(row=6,column=0)
buttonEqual = Button(root,width=34,height=8,text="=",command=button_equal).grid(row=6,column=1,columnspan=2)
buttonPlus = Button(root,width=16,height=8,text="+",command=button_add).grid(row=7,column=0)
buttonSubtract = Button(root,width=16,height=8,text="-",command=button_subtract).grid(row=7,column=1)
buttonMul = Button(root,width=16,height=8,text="*",command=button_multiply).grid(row=7,column=2)
root.mainloop()
|
normal
|
{
"blob_id": "e6320bc1c344c87818a4063616db0c63b7b8be49",
"index": 1294,
"step-1": "<mask token>\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef button_clear():\n e.delete(0, END)\n\n\ndef button_add():\n first_number = e.get()\n global f_num\n global math\n math = 'addition'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, f_num + int(second_number))\n if math == 'subtraction':\n e.insert(0, f_num - int(second_number))\n if math == 'multiplication':\n e.insert(0, f_num * int(second_number))\n if math == 'division':\n e.insert(0, f_num / int(second_number))\n\n\ndef button_subtract():\n first_number = e.get()\n global f_num\n global math\n math = 'subtraction'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_multiply():\n first_number = e.get()\n global f_num\n global math\n math = 'multiplication'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global f_num\n global math\n math = 'division'\n f_num = int(first_number)\n e.delete(0, END)\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal math\n<mask token>\nroot.title('Calculator')\n<mask token>\ne.grid(columnspan=3)\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef button_clear():\n e.delete(0, END)\n\n\ndef button_add():\n first_number = e.get()\n global f_num\n global math\n math = 'addition'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, f_num + int(second_number))\n if math == 'subtraction':\n e.insert(0, f_num - int(second_number))\n if math == 'multiplication':\n e.insert(0, f_num * int(second_number))\n if math == 'division':\n e.insert(0, f_num / int(second_number))\n\n\ndef button_subtract():\n first_number = e.get()\n global f_num\n global math\n math = 'subtraction'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_multiply():\n first_number = e.get()\n global f_num\n global math\n math = 'multiplication'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global f_num\n global math\n math = 'division'\n f_num = int(first_number)\n e.delete(0, END)\n\n\n<mask token>\nbuttonClear.grid(row=1, column=0, columnspan=3)\n<mask token>\nroot.mainloop()\n",
"step-3": "<mask token>\nglobal math\nroot = Tk()\nroot.title('Calculator')\ne = Entry(root, width=60, borderwidth=5)\ne.grid(columnspan=3)\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef button_clear():\n e.delete(0, END)\n\n\ndef button_add():\n first_number = e.get()\n global f_num\n global math\n math = 'addition'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, f_num + int(second_number))\n if math == 'subtraction':\n e.insert(0, f_num - int(second_number))\n if math == 'multiplication':\n e.insert(0, f_num * int(second_number))\n if math == 'division':\n e.insert(0, f_num / int(second_number))\n\n\ndef button_subtract():\n first_number = e.get()\n global f_num\n global math\n math = 'subtraction'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_multiply():\n first_number = e.get()\n global f_num\n global math\n math = 'multiplication'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global f_num\n global math\n math = 'division'\n f_num = int(first_number)\n e.delete(0, END)\n\n\nbuttonClear = Button(root, width=52, height=8, text='Clear', command=\n button_clear)\nbuttonClear.grid(row=1, column=0, columnspan=3)\nbutton7 = Button(root, width=16, height=8, text='7', command=lambda :\n button_click(7)).grid(row=3, column=0)\nbutton8 = Button(root, width=16, height=8, text='8', command=lambda :\n button_click(8)).grid(row=3, column=1)\nbutton9 = Button(root, width=16, height=8, text='9', command=lambda :\n button_click(9)).grid(row=3, column=2)\nbutton4 = Button(root, width=16, height=8, text='4', command=lambda :\n button_click(4)).grid(row=4, column=0)\nbutton5 = Button(root, width=16, height=8, text='5', command=lambda :\n button_click(5)).grid(row=4, column=1)\nbutton6 = Button(root, width=16, height=8, text='6', command=lambda :\n button_click(6)).grid(row=4, column=2)\nbutton1 = Button(root, width=16, height=8, text='1', command=lambda :\n button_click(1)).grid(row=5, column=0)\nbutton2 = Button(root, width=16, height=8, text='2', command=lambda :\n button_click(2)).grid(row=5, column=1)\nbutton3 = Button(root, width=16, height=8, text='3', command=lambda :\n button_click(3)).grid(row=5, column=2)\nbutton0 = Button(root, width=16, height=8, text='0', command=lambda :\n button_click(0)).grid(row=6, column=0)\nbuttonEqual = Button(root, width=34, height=8, text='=', command=button_equal\n ).grid(row=6, column=1, columnspan=2)\nbuttonPlus = Button(root, width=16, height=8, text='+', command=button_add\n ).grid(row=7, column=0)\nbuttonSubtract = Button(root, width=16, height=8, text='-', command=\n button_subtract).grid(row=7, column=1)\nbuttonMul = Button(root, width=16, height=8, text='*', command=button_multiply\n ).grid(row=7, column=2)\nroot.mainloop()\n",
"step-4": "from tkinter import *\nglobal math\nroot = Tk()\nroot.title('Calculator')\ne = Entry(root, width=60, borderwidth=5)\ne.grid(columnspan=3)\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef button_clear():\n e.delete(0, END)\n\n\ndef button_add():\n first_number = e.get()\n global f_num\n global math\n math = 'addition'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, f_num + int(second_number))\n if math == 'subtraction':\n e.insert(0, f_num - int(second_number))\n if math == 'multiplication':\n e.insert(0, f_num * int(second_number))\n if math == 'division':\n e.insert(0, f_num / int(second_number))\n\n\ndef button_subtract():\n first_number = e.get()\n global f_num\n global math\n math = 'subtraction'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_multiply():\n first_number = e.get()\n global f_num\n global math\n math = 'multiplication'\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global f_num\n global math\n math = 'division'\n f_num = int(first_number)\n e.delete(0, END)\n\n\nbuttonClear = Button(root, width=52, height=8, text='Clear', command=\n button_clear)\nbuttonClear.grid(row=1, column=0, columnspan=3)\nbutton7 = Button(root, width=16, height=8, text='7', command=lambda :\n button_click(7)).grid(row=3, column=0)\nbutton8 = Button(root, width=16, height=8, text='8', command=lambda :\n button_click(8)).grid(row=3, column=1)\nbutton9 = Button(root, width=16, height=8, text='9', command=lambda :\n button_click(9)).grid(row=3, column=2)\nbutton4 = Button(root, width=16, height=8, text='4', command=lambda :\n button_click(4)).grid(row=4, column=0)\nbutton5 = Button(root, width=16, height=8, text='5', command=lambda :\n button_click(5)).grid(row=4, column=1)\nbutton6 = Button(root, width=16, height=8, text='6', command=lambda :\n button_click(6)).grid(row=4, column=2)\nbutton1 = Button(root, width=16, height=8, text='1', command=lambda :\n button_click(1)).grid(row=5, column=0)\nbutton2 = Button(root, width=16, height=8, text='2', command=lambda :\n button_click(2)).grid(row=5, column=1)\nbutton3 = Button(root, width=16, height=8, text='3', command=lambda :\n button_click(3)).grid(row=5, column=2)\nbutton0 = Button(root, width=16, height=8, text='0', command=lambda :\n button_click(0)).grid(row=6, column=0)\nbuttonEqual = Button(root, width=34, height=8, text='=', command=button_equal\n ).grid(row=6, column=1, columnspan=2)\nbuttonPlus = Button(root, width=16, height=8, text='+', command=button_add\n ).grid(row=7, column=0)\nbuttonSubtract = Button(root, width=16, height=8, text='-', command=\n button_subtract).grid(row=7, column=1)\nbuttonMul = Button(root, width=16, height=8, text='*', command=button_multiply\n ).grid(row=7, column=2)\nroot.mainloop()\n",
"step-5": "from tkinter import *\r\n\r\nglobal math\r\n\r\nroot = Tk()\r\n\r\nroot.title(\"Calculator\")\r\n\r\ne = Entry(root,width=60,borderwidth=5)\r\ne.grid(columnspan=3)\r\n\r\ndef button_click(number):\r\n\t#e.delete(0, END)\r\n\tcurrent = e.get()\r\n\te.delete(0, END)\r\n\te.insert(0, str(current) + str(number))\r\n\r\ndef button_clear():\r\n\te.delete(0, END)\r\n\r\n\r\ndef button_add():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"addition\"\r\n\tf_num = int(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_equal():\r\n\tsecond_number = e.get()\r\n\te.delete(0, END)\r\n\t\r\n\tif math == \"addition\":\r\n\t\te.insert(0, f_num + int(second_number))\r\n\r\n\tif math == \"subtraction\":\r\n\t\te.insert(0, f_num - int(second_number))\r\n\r\n\tif math == \"multiplication\":\r\n\t\te.insert(0, f_num * int(second_number))\r\n\r\n\tif math == \"division\":\r\n\t\te.insert(0, f_num / int(second_number))\r\n\r\ndef button_subtract():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"subtraction\"\r\n\tf_num = int(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_multiply():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"multiplication\"\r\n\tf_num = int(first_number)\r\n\te.delete(0, END)\r\n\r\ndef button_divide():\r\n\tfirst_number = e.get()\r\n\tglobal f_num\r\n\tglobal math\r\n\tmath = \"division\"\r\n\tf_num = int(first_number)\r\n\te.delete(0, END)\r\n\r\n\r\n\r\nbuttonClear = Button(root,width=52,height=8,text=\"Clear\",command=button_clear)\r\nbuttonClear.grid(row=1,column=0,columnspan=3)\r\n\r\nbutton7 = Button(root,width=16,height=8,text=\"7\",command=lambda: button_click(7)).grid(row=3,column=0)\r\nbutton8 = Button(root,width=16,height=8,text=\"8\",command=lambda: button_click(8)).grid(row=3,column=1)\r\nbutton9 = Button(root,width=16,height=8,text=\"9\",command=lambda: button_click(9)).grid(row=3,column=2)\r\n\r\nbutton4 = Button(root,width=16,height=8,text=\"4\",command=lambda: button_click(4)).grid(row=4,column=0)\r\nbutton5 = Button(root,width=16,height=8,text=\"5\",command=lambda: button_click(5)).grid(row=4,column=1)\r\nbutton6 = Button(root,width=16,height=8,text=\"6\",command=lambda: button_click(6)).grid(row=4,column=2)\r\n\r\nbutton1 = Button(root,width=16,height=8,text=\"1\",command=lambda: button_click(1)).grid(row=5,column=0)\r\nbutton2 = Button(root,width=16,height=8,text=\"2\",command=lambda: button_click(2)).grid(row=5,column=1)\r\nbutton3 = Button(root,width=16,height=8,text=\"3\",command=lambda: button_click(3)).grid(row=5,column=2)\r\n\r\n\r\nbutton0 = Button(root,width=16,height=8,text=\"0\",command=lambda: button_click(0)).grid(row=6,column=0)\r\nbuttonEqual = Button(root,width=34,height=8,text=\"=\",command=button_equal).grid(row=6,column=1,columnspan=2)\r\n\r\nbuttonPlus = Button(root,width=16,height=8,text=\"+\",command=button_add).grid(row=7,column=0)\r\nbuttonSubtract = Button(root,width=16,height=8,text=\"-\",command=button_subtract).grid(row=7,column=1)\r\nbuttonMul = Button(root,width=16,height=8,text=\"*\",command=button_multiply).grid(row=7,column=2)\r\n\r\nroot.mainloop()",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def train_model(model_name):
if model_name == 'LinearRegression':
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Lasso':
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Ridge':
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'tree':
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train_model(model_name):
if model_name == 'LinearRegression':
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Lasso':
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Ridge':
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'tree':
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = 'Lasso'
train_model(model_chosen)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == 'LinearRegression':
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Lasso':
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Ridge':
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'tree':
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = 'Lasso'
train_model(model_chosen)
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
import pickle as pk
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == 'LinearRegression':
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Lasso':
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'Ridge':
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == 'tree':
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = 'Lasso'
train_model(model_chosen)
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
import pickle as pk
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == "LinearRegression":
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Lasso":
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Ridge":
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "tree":
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = "Lasso"
train_model(model_chosen)
|
flexible
|
{
"blob_id": "539726df0e631c7a8edabf50fd739ee0497e3e97",
"index": 5557,
"step-1": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-3": "<mask token>\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-4": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == 'LinearRegression':\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Lasso':\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'Ridge':\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n if model_name == 'tree':\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = 'Lasso'\n train_model(model_chosen)\n",
"step-5": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression, Lasso, Ridge\nfrom sklearn import tree\nimport pickle as pk\n\nX = pk.load(file=open('../data/temp/train.pkl', 'rb'))\ny = pk.load(file=open('../data/temp/label.pkl', 'rb'))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)\n\n\ndef train_model(model_name):\n if model_name == \"LinearRegression\":\n model = LinearRegression()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Lasso\":\n model = Lasso(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"Ridge\":\n model = Ridge(alpha=1)\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n if model_name == \"tree\":\n model = tree.DecisionTreeRegressor()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(score)\n\n\nif __name__ == '__main__':\n model_chosen = \"Lasso\"\n train_model(model_chosen)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#-*- coding:UTF-8 -*-
year = int(input('请输入一个年份:'))
"""
if(year % 4) == 0:
if(year % 100) == 0:
if(year % 400) == 0:
print('{0}是润年'.format(year))
else:
print('{0}不是润年'.format(year))
else:
print('{0}是润年'.format(year))
else:
print('{0}不是润年'.format(year))
"""
if(year%4)==0 and (year%100)!=0 or (year%400)==0:
print('{0}是润年'.format(year))
else:
print('{0}不是润年'.format(year))
|
normal
|
{
"blob_id": "78178ec8474a3deb876ab7d3950cd427d7a795d5",
"index": 2218,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n print('{0}是润年'.format(year))\nelse:\n print('{0}不是润年'.format(year))\n",
"step-3": "year = int(input('请输入一个年份:'))\n<mask token>\nif year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n print('{0}是润年'.format(year))\nelse:\n print('{0}不是润年'.format(year))\n",
"step-4": "#-*- coding:UTF-8 -*- \n\nyear = int(input('请输入一个年份:'))\n\"\"\"\nif(year % 4) == 0:\n if(year % 100) == 0:\n if(year % 400) == 0:\n print('{0}是润年'.format(year))\n else:\n print('{0}不是润年'.format(year))\n else:\n print('{0}是润年'.format(year))\nelse:\n print('{0}不是润年'.format(year)) \n\n\"\"\"\nif(year%4)==0 and (year%100)!=0 or (year%400)==0:\n print('{0}是润年'.format(year))\nelse:\n print('{0}不是润年'.format(year)) \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PixelCNN(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PixelCNN(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width, 256]
"""
batch_size, c_in, height, width = x.size()
x = self.MaskAConv(x)
x = self.MaskBConv(x)
x = self.out(x)
x = x.view(batch_size, c_in, self.discrete_channel, height, width)
x = x.permute(0, 1, 3, 4, 2)
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PixelCNN(nn.Module):
def __init__(self, n_channel=3, h=128, discrete_channel=256):
"""PixelCNN Model"""
super(PixelCNN, self).__init__()
self.discrete_channel = discrete_channel
self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)
MaskBConv = []
for i in range(15):
MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskBConv = nn.Sequential(*MaskBConv)
self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,
kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.
ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,
kernel_size=1, stride=1, padding=0))
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width, 256]
"""
batch_size, c_in, height, width = x.size()
x = self.MaskAConv(x)
x = self.MaskBConv(x)
x = self.out(x)
x = x.view(batch_size, c_in, self.discrete_channel, height, width)
x = x.permute(0, 1, 3, 4, 2)
return x
<|reserved_special_token_1|>
import torch.nn as nn
from layers import maskAConv, MaskBConvBlock
class PixelCNN(nn.Module):
def __init__(self, n_channel=3, h=128, discrete_channel=256):
"""PixelCNN Model"""
super(PixelCNN, self).__init__()
self.discrete_channel = discrete_channel
self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)
MaskBConv = []
for i in range(15):
MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskBConv = nn.Sequential(*MaskBConv)
self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,
kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.
ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,
kernel_size=1, stride=1, padding=0))
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width, 256]
"""
batch_size, c_in, height, width = x.size()
x = self.MaskAConv(x)
x = self.MaskBConv(x)
x = self.out(x)
x = x.view(batch_size, c_in, self.discrete_channel, height, width)
x = x.permute(0, 1, 3, 4, 2)
return x
<|reserved_special_token_1|>
import torch.nn as nn
from layers import maskAConv, MaskBConvBlock
class PixelCNN(nn.Module):
def __init__(self, n_channel=3, h=128, discrete_channel=256):
"""PixelCNN Model"""
super(PixelCNN, self).__init__()
self.discrete_channel = discrete_channel
self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)
MaskBConv = []
for i in range(15):
MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskBConv = nn.Sequential(*MaskBConv)
# 1x1 conv to 3x256 channels
self.out = nn.Sequential(
nn.ReLU(),
nn.Conv2d(2 * h, 1024, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, n_channel * discrete_channel, kernel_size=1, stride=1, padding=0))
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width, 256]
"""
batch_size, c_in, height, width = x.size()
# [batch_size, 2h, 32, 32]
x = self.MaskAConv(x)
# [batch_size, 2h, 32, 32]
x = self.MaskBConv(x)
# [batch_size, 3x256, 32, 32]
x = self.out(x)
# [batch_size, 3, 256, 32, 32]
x = x.view(batch_size, c_in, self.discrete_channel, height, width)
# [batch_size, 3, 32, 32, 256]
x = x.permute(0, 1, 3, 4, 2)
return x
|
flexible
|
{
"blob_id": "3185b6b1902099caed66ce6f97cd1b9940261fc1",
"index": 7533,
"step-1": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n <mask token>\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n",
"step-3": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n self.discrete_channel = discrete_channel\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,\n kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.\n ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,\n kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n",
"step-4": "import torch.nn as nn\nfrom layers import maskAConv, MaskBConvBlock\n\n\nclass PixelCNN(nn.Module):\n\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n self.discrete_channel = discrete_channel\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,\n kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.\n ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,\n kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n",
"step-5": "import torch.nn as nn\nfrom layers import maskAConv, MaskBConvBlock\n\n\nclass PixelCNN(nn.Module):\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n\n self.discrete_channel = discrete_channel\n\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n\n # 1x1 conv to 3x256 channels\n self.out = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(2 * h, 1024, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(1024),\n nn.ReLU(),\n nn.Conv2d(1024, n_channel * discrete_channel, kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n\n # [batch_size, 2h, 32, 32]\n x = self.MaskAConv(x)\n\n # [batch_size, 2h, 32, 32]\n x = self.MaskBConv(x)\n\n # [batch_size, 3x256, 32, 32]\n x = self.out(x)\n\n # [batch_size, 3, 256, 32, 32]\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n\n # [batch_size, 3, 32, 32, 256]\n x = x.permute(0, 1, 3, 4, 2)\n\n return x\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .models import Stock
from .serializers import StockSerializer
from rest_framework import generics
class StockListCreate(generics.ListCreateAPIView):
queryset = Stock.objects.all()
serializer_class = StockSerializer
|
normal
|
{
"blob_id": "9adf18b3a65bf58dd4c22a6fe026d0dd868533fb",
"index": 5468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StockListCreate(generics.ListCreateAPIView):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass StockListCreate(generics.ListCreateAPIView):\n queryset = Stock.objects.all()\n serializer_class = StockSerializer\n",
"step-4": "from .models import Stock\nfrom .serializers import StockSerializer\nfrom rest_framework import generics\n\n\nclass StockListCreate(generics.ListCreateAPIView):\n queryset = Stock.objects.all()\n serializer_class = StockSerializer\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
times = np.linspace(0.0, 10.0, 100)
result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])
fig, ax = plt.subplots()
ax.plot(times, result.expect[0]) # doctest: +SKIP
ax.plot(times, result.expect[1]) # doctest: +SKIP
ax.set_xlabel('Time') # doctest: +SKIP
ax.set_ylabel('Expectation values') # doctest: +SKIP
ax.legend(("Sigma-Z", "Sigma-Y")) # doctest: +SKIP
plt.show() # doctest: +SKIP
|
normal
|
{
"blob_id": "8474205d49aef2d18755fc1a25a82718962f4120",
"index": 6912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n",
"step-3": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(),\n sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n",
"step-4": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0]) # doctest: +SKIP\nax.plot(times, result.expect[1]) # doctest: +SKIP\nax.set_xlabel('Time') # doctest: +SKIP\nax.set_ylabel('Expectation values') # doctest: +SKIP\nax.legend((\"Sigma-Z\", \"Sigma-Y\")) # doctest: +SKIP\nplt.show() # doctest: +SKIP\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel
class Sizes(str, Enum):
one_gram = "1g"
two_and_half_gram = "2.5g"
one_ounce = "1oz"
five_ounce = "5oz"
ten_ounce = "10oz"
class PriceSort(str, Enum):
gte = "gte"
lte = "lte"
class Metals(str, Enum):
gold = "gold"
silver = "silver"
class PriceFilter(BaseModel):
type: PriceSort
price: float
class ProductSearch(BaseModel):
price: Optional[PriceFilter]
metals: Optional[List[Metals]]
size: Optional[Sizes]
|
normal
|
{
"blob_id": "442c6c4894fc01d0f8142f3dcedfd51ba57aedd1",
"index": 3304,
"step-1": "<mask token>\n\n\nclass Metals(str, Enum):\n gold = 'gold'\n silver = 'silver'\n\n\nclass PriceFilter(BaseModel):\n type: PriceSort\n price: float\n\n\nclass ProductSearch(BaseModel):\n price: Optional[PriceFilter]\n metals: Optional[List[Metals]]\n size: Optional[Sizes]\n",
"step-2": "<mask token>\n\n\nclass PriceSort(str, Enum):\n <mask token>\n <mask token>\n\n\nclass Metals(str, Enum):\n gold = 'gold'\n silver = 'silver'\n\n\nclass PriceFilter(BaseModel):\n type: PriceSort\n price: float\n\n\nclass ProductSearch(BaseModel):\n price: Optional[PriceFilter]\n metals: Optional[List[Metals]]\n size: Optional[Sizes]\n",
"step-3": "<mask token>\n\n\nclass Sizes(str, Enum):\n one_gram = '1g'\n two_and_half_gram = '2.5g'\n one_ounce = '1oz'\n five_ounce = '5oz'\n ten_ounce = '10oz'\n\n\nclass PriceSort(str, Enum):\n gte = 'gte'\n lte = 'lte'\n\n\nclass Metals(str, Enum):\n gold = 'gold'\n silver = 'silver'\n\n\nclass PriceFilter(BaseModel):\n type: PriceSort\n price: float\n\n\nclass ProductSearch(BaseModel):\n price: Optional[PriceFilter]\n metals: Optional[List[Metals]]\n size: Optional[Sizes]\n",
"step-4": "from enum import Enum\nfrom typing import List, Optional\nfrom pydantic import BaseModel\n\n\nclass Sizes(str, Enum):\n one_gram = '1g'\n two_and_half_gram = '2.5g'\n one_ounce = '1oz'\n five_ounce = '5oz'\n ten_ounce = '10oz'\n\n\nclass PriceSort(str, Enum):\n gte = 'gte'\n lte = 'lte'\n\n\nclass Metals(str, Enum):\n gold = 'gold'\n silver = 'silver'\n\n\nclass PriceFilter(BaseModel):\n type: PriceSort\n price: float\n\n\nclass ProductSearch(BaseModel):\n price: Optional[PriceFilter]\n metals: Optional[List[Metals]]\n size: Optional[Sizes]\n",
"step-5": "from enum import Enum\nfrom typing import List, Optional\nfrom pydantic import BaseModel\n\n\nclass Sizes(str, Enum):\n one_gram = \"1g\"\n two_and_half_gram = \"2.5g\"\n one_ounce = \"1oz\"\n five_ounce = \"5oz\"\n ten_ounce = \"10oz\"\n\n\nclass PriceSort(str, Enum):\n gte = \"gte\"\n lte = \"lte\"\n\n\nclass Metals(str, Enum):\n gold = \"gold\"\n silver = \"silver\"\n\n\nclass PriceFilter(BaseModel):\n type: PriceSort\n price: float\n\n\nclass ProductSearch(BaseModel):\n price: Optional[PriceFilter]\n metals: Optional[List[Metals]]\n size: Optional[Sizes]\n",
"step-ids": [
4,
5,
8,
9,
10
]
}
|
[
4,
5,
8,
9,
10
] |
#!/usr/bin/env python3
import unittest
import solution
class TestMethods(unittest.TestCase):
def LinkedListFromArray(self, values):
if len(values) > 0:
headNode = solution.ListNode(values[0], None)
tailPtr = headNode
if len(values) > 1:
for value in values[1:]:
tailPtr.setNext(solution.ListNode(value))
tailPtr = tailPtr.getNext()
return headNode
else:
return None
def printLinkedList(self, headNode):
print(self.linkedListToArray(headNode))
def linkedListToArray(self, headNode):
result = []
current = headNode
while current:
result.append(current.getValue())
current = current.getNext()
return result
def checkLinkedListsAreEqual(self, headNodeA, headNodeB):
valuesA = self.linkedListToArray(headNodeA)
valuesB = self.linkedListToArray(headNodeB)
return valuesA == valuesB
def test_example_1(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([2, 4, 3])
l2 = self.LinkedListFromArray([5, 6, 4])
expected = [7, 0, 8]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_steve7411(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([9])
l2 = self.LinkedListFromArray([1])
expected = [0, 1]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_2(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([0])
l2 = self.LinkedListFromArray([0])
expected = [0]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
def test_example_2(self):
sol = solution.Solution();
l1 = self.LinkedListFromArray([9,9,9,9,9,9,9])
l2 = self.LinkedListFromArray([9,9,9,9])
expected = [8,9,9,9,0,0,0,1]
self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "2a3f9c4518df337cfc5e4b1816e7b2b4af62c101",
"index": 8020,
"step-1": "<mask token>\n\n\nclass TestMethods(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def checkLinkedListsAreEqual(self, headNodeA, headNodeB):\n valuesA = self.linkedListToArray(headNodeA)\n valuesB = self.linkedListToArray(headNodeB)\n return valuesA == valuesB\n\n def test_example_1(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([2, 4, 3])\n l2 = self.LinkedListFromArray([5, 6, 4])\n expected = [7, 0, 8]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_steve7411(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9])\n l2 = self.LinkedListFromArray([1])\n expected = [0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([0])\n l2 = self.LinkedListFromArray([0])\n expected = [0]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9, 9, 9, 9, 9, 9, 9])\n l2 = self.LinkedListFromArray([9, 9, 9, 9])\n expected = [8, 9, 9, 9, 0, 0, 0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMethods(unittest.TestCase):\n <mask token>\n\n def printLinkedList(self, headNode):\n print(self.linkedListToArray(headNode))\n\n def linkedListToArray(self, headNode):\n result = []\n current = headNode\n while current:\n result.append(current.getValue())\n current = current.getNext()\n return result\n\n def checkLinkedListsAreEqual(self, headNodeA, headNodeB):\n valuesA = self.linkedListToArray(headNodeA)\n valuesB = self.linkedListToArray(headNodeB)\n return valuesA == valuesB\n\n def test_example_1(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([2, 4, 3])\n l2 = self.LinkedListFromArray([5, 6, 4])\n expected = [7, 0, 8]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_steve7411(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9])\n l2 = self.LinkedListFromArray([1])\n expected = [0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([0])\n l2 = self.LinkedListFromArray([0])\n expected = [0]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9, 9, 9, 9, 9, 9, 9])\n l2 = self.LinkedListFromArray([9, 9, 9, 9])\n expected = [8, 9, 9, 9, 0, 0, 0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMethods(unittest.TestCase):\n\n def LinkedListFromArray(self, values):\n if len(values) > 0:\n headNode = solution.ListNode(values[0], None)\n tailPtr = headNode\n if len(values) > 1:\n for value in values[1:]:\n tailPtr.setNext(solution.ListNode(value))\n tailPtr = tailPtr.getNext()\n return headNode\n else:\n return None\n\n def printLinkedList(self, headNode):\n print(self.linkedListToArray(headNode))\n\n def linkedListToArray(self, headNode):\n result = []\n current = headNode\n while current:\n result.append(current.getValue())\n current = current.getNext()\n return result\n\n def checkLinkedListsAreEqual(self, headNodeA, headNodeB):\n valuesA = self.linkedListToArray(headNodeA)\n valuesB = self.linkedListToArray(headNodeB)\n return valuesA == valuesB\n\n def test_example_1(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([2, 4, 3])\n l2 = self.LinkedListFromArray([5, 6, 4])\n expected = [7, 0, 8]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_steve7411(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9])\n l2 = self.LinkedListFromArray([1])\n expected = [0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([0])\n l2 = self.LinkedListFromArray([0])\n expected = [0]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9, 9, 9, 9, 9, 9, 9])\n l2 = self.LinkedListFromArray([9, 9, 9, 9])\n expected = [8, 9, 9, 9, 0, 0, 0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestMethods(unittest.TestCase):\n\n def LinkedListFromArray(self, values):\n if len(values) > 0:\n headNode = solution.ListNode(values[0], None)\n tailPtr = headNode\n if len(values) > 1:\n for value in values[1:]:\n tailPtr.setNext(solution.ListNode(value))\n tailPtr = tailPtr.getNext()\n return headNode\n else:\n return None\n\n def printLinkedList(self, headNode):\n print(self.linkedListToArray(headNode))\n\n def linkedListToArray(self, headNode):\n result = []\n current = headNode\n while current:\n result.append(current.getValue())\n current = current.getNext()\n return result\n\n def checkLinkedListsAreEqual(self, headNodeA, headNodeB):\n valuesA = self.linkedListToArray(headNodeA)\n valuesB = self.linkedListToArray(headNodeB)\n return valuesA == valuesB\n\n def test_example_1(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([2, 4, 3])\n l2 = self.LinkedListFromArray([5, 6, 4])\n expected = [7, 0, 8]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_steve7411(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9])\n l2 = self.LinkedListFromArray([1])\n expected = [0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([0])\n l2 = self.LinkedListFromArray([0])\n expected = [0]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n def test_example_2(self):\n sol = solution.Solution()\n l1 = self.LinkedListFromArray([9, 9, 9, 9, 9, 9, 9])\n l2 = self.LinkedListFromArray([9, 9, 9, 9])\n expected = [8, 9, 9, 9, 0, 0, 0, 1]\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)),\n expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python3\n\nimport unittest\nimport solution\n\nclass TestMethods(unittest.TestCase):\n def LinkedListFromArray(self, values):\n if len(values) > 0:\n headNode = solution.ListNode(values[0], None)\n tailPtr = headNode\n\n if len(values) > 1:\n for value in values[1:]:\n tailPtr.setNext(solution.ListNode(value))\n tailPtr = tailPtr.getNext()\n\n return headNode\n else:\n return None\n\n def printLinkedList(self, headNode):\n print(self.linkedListToArray(headNode))\n\n def linkedListToArray(self, headNode):\n result = []\n current = headNode\n\n while current:\n result.append(current.getValue())\n current = current.getNext()\n\n return result\n\n def checkLinkedListsAreEqual(self, headNodeA, headNodeB):\n valuesA = self.linkedListToArray(headNodeA)\n valuesB = self.linkedListToArray(headNodeB)\n\n return valuesA == valuesB\n\n def test_example_1(self):\n sol = solution.Solution();\n l1 = self.LinkedListFromArray([2, 4, 3])\n l2 = self.LinkedListFromArray([5, 6, 4])\n\n expected = [7, 0, 8]\n\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)\n\n def test_example_steve7411(self):\n sol = solution.Solution();\n l1 = self.LinkedListFromArray([9])\n l2 = self.LinkedListFromArray([1])\n\n expected = [0, 1]\n\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)\n\n def test_example_2(self):\n sol = solution.Solution();\n l1 = self.LinkedListFromArray([0])\n l2 = self.LinkedListFromArray([0])\n\n expected = [0]\n\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)\n\n def test_example_2(self):\n sol = solution.Solution();\n l1 = self.LinkedListFromArray([9,9,9,9,9,9,9])\n l2 = self.LinkedListFromArray([9,9,9,9])\n\n expected = [8,9,9,9,0,0,0,1]\n\n self.assertEqual(self.linkedListToArray(sol.addTwoNumbers(l1, l2)), expected)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
from django.contrib import admin, messages
from django.conf.urls import url
from django.shortcuts import render
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect, HttpResponse
from website_data.models import *
from website_data.forms import *
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class WebsiteDataAdmin(admin.ModelAdmin):
# URLs overwriting to add new admin views (with auth check and without cache)
def get_urls(self):
urls = super(WebsiteDataAdmin, self).get_urls()
my_urls = [
# url(r'^edit-site/(?:(?P<site_id>\d+)/)$', self.admin_site.admin_view(self.edit_site)),
url(r'^create-defaults/$', self.admin_site.admin_view(self.create_defaults)),
]
# return custom URLs with default URLs
return my_urls + urls
"""
def edit_site(self, request, site_id):
""Function to select a site to edit""
WebsiteData_obj = WebsiteData()
Site_obj = Site.objects.get(pk=site_id)
if request.method == 'POST':
form = EditTextSiteForm(request.POST)
if form.is_valid():
# TODO: salvo i valori delle relative chiavi
WebsiteData_obj.set_all_keys_about_site(site_id=site_id, post=request.POST)
# pagina di successo con i dati aggiornati precompilati
messages.add_message(request, messages.SUCCESS, 'Dati salvati con successo.')
return HttpResponseRedirect('/admin/website_data/websitedata/edit-site/' + str(site_id)) # Redirect after POST
else:
form = EditTextSiteForm() # An unbound form
# precompilo la post con eventuali valori presenti
request.POST = WebsiteData_obj.get_all_keys_about_site(site_domain=Site_obj.domain)
# logger.info("chiavi salvate in db per il sito " + str(site_id) + ": " + str(request.POST))
context = {
'form' : form,
'post': request.POST,
'title': "Modifica informazioni sito: " + str(Site_obj.domain),
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'has_permission': request.user.is_superuser,
'site_url': '/',
}
return render(request, 'admin/custom_view/edit_site.html', context)
"""
def create_defaults(self, request):
"""Function to create default keys and themes"""
ThemeKeys_obj = ThemeKeys()
ThemeKeys_obj.create_default_keys()
WebsitePreferenceKeys_obj = WebsitePreferenceKeys()
WebsitePreferenceKeys_obj.create_default_keys()
context = {
'title': "Creazione chiavi e temi di default",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'has_permission': request.user.is_superuser,
'site_url': '/',
}
messages.add_message(request, messages.SUCCESS, 'Valori di default creati con successo.')
return render(request, 'admin/custom_view/create_defaults.html', context)
def get_model_perms(self, request):
"""
https://stackoverflow.com/questions/2431727/django-admin-hide-a-model
Return empty perms dict thus hiding the model from admin index.
Per far funzionare le custom view dell'app website_data ma nascondendo
tutti i modelli, in questo modo gli url funzionano ma nell'admin non si
vede nessun modello da modificare/aggiungere.
"""
return {}
class CustomSiteInstanceInline(admin.StackedInline):
model = CustomSites
class WebsitePreferencesInstanceInline(admin.TabularInline):
model = WebsitePreferences
# Define a new Site admin
class SiteAdmin(admin.ModelAdmin):
list_filter = ('domain', 'name')
inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]
# TODO: pagine aggiuntive per l'admin (da usare solo per debug o manutenzione)
"""
admin.site.register(Themes)
admin.site.register(ThemeKeys)
admin.site.register(WebsitePreferences)
admin.site.register(WebsitePreferenceKeys)
admin.site.register(CustomSites)
"""
admin.site.unregister(Site)
admin.site.register(Site, SiteAdmin)
admin.site.register(WebsiteData, WebsiteDataAdmin)
|
normal
|
{
"blob_id": "614d6484678890df2ae0f750a3cad51a2b9bd1c6",
"index": 2315,
"step-1": "<mask token>\n\n\nclass WebsitePreferencesInstanceInline(admin.TabularInline):\n model = WebsitePreferences\n\n\nclass SiteAdmin(admin.ModelAdmin):\n list_filter = 'domain', 'name'\n inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WebsiteDataAdmin(admin.ModelAdmin):\n\n def get_urls(self):\n urls = super(WebsiteDataAdmin, self).get_urls()\n my_urls = [url('^create-defaults/$', self.admin_site.admin_view(\n self.create_defaults))]\n return my_urls + urls\n <mask token>\n\n def create_defaults(self, request):\n \"\"\"Function to create default keys and themes\"\"\"\n ThemeKeys_obj = ThemeKeys()\n ThemeKeys_obj.create_default_keys()\n WebsitePreferenceKeys_obj = WebsitePreferenceKeys()\n WebsitePreferenceKeys_obj.create_default_keys()\n context = {'title': 'Creazione chiavi e temi di default', 'opts':\n self.model._meta, 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser, 'site_url': '/'}\n messages.add_message(request, messages.SUCCESS,\n 'Valori di default creati con successo.')\n return render(request, 'admin/custom_view/create_defaults.html',\n context)\n\n def get_model_perms(self, request):\n \"\"\"\n https://stackoverflow.com/questions/2431727/django-admin-hide-a-model\n\n Return empty perms dict thus hiding the model from admin index.\n Per far funzionare le custom view dell'app website_data ma nascondendo\n tutti i modelli, in questo modo gli url funzionano ma nell'admin non si\n vede nessun modello da modificare/aggiungere.\n \"\"\"\n return {}\n\n\nclass CustomSiteInstanceInline(admin.StackedInline):\n model = CustomSites\n\n\nclass WebsitePreferencesInstanceInline(admin.TabularInline):\n model = WebsitePreferences\n\n\nclass SiteAdmin(admin.ModelAdmin):\n list_filter = 'domain', 'name'\n inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass WebsiteDataAdmin(admin.ModelAdmin):\n\n def get_urls(self):\n urls = super(WebsiteDataAdmin, self).get_urls()\n my_urls = [url('^create-defaults/$', self.admin_site.admin_view(\n self.create_defaults))]\n return my_urls + urls\n \"\"\"\n def edit_site(self, request, site_id):\n \"\"Function to select a site to edit\"\"\n\n WebsiteData_obj = WebsiteData()\n Site_obj = Site.objects.get(pk=site_id)\n\n if request.method == 'POST':\n form = EditTextSiteForm(request.POST)\n\n if form.is_valid():\n # TODO: salvo i valori delle relative chiavi\n WebsiteData_obj.set_all_keys_about_site(site_id=site_id, post=request.POST)\n\n # pagina di successo con i dati aggiornati precompilati\n messages.add_message(request, messages.SUCCESS, 'Dati salvati con successo.')\n return HttpResponseRedirect('/admin/website_data/websitedata/edit-site/' + str(site_id)) # Redirect after POST\n else:\n form = EditTextSiteForm() # An unbound form\n # precompilo la post con eventuali valori presenti\n request.POST = WebsiteData_obj.get_all_keys_about_site(site_domain=Site_obj.domain)\n # logger.info(\"chiavi salvate in db per il sito \" + str(site_id) + \": \" + str(request.POST))\n\n context = {\n 'form' : form,\n 'post': request.POST,\n 'title': \"Modifica informazioni sito: \" + str(Site_obj.domain),\n 'opts': self.model._meta,\n 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser,\n 'site_url': '/',\n }\n\n return render(request, 'admin/custom_view/edit_site.html', context)\n \"\"\"\n\n def create_defaults(self, request):\n \"\"\"Function to create default keys and themes\"\"\"\n ThemeKeys_obj = ThemeKeys()\n ThemeKeys_obj.create_default_keys()\n WebsitePreferenceKeys_obj = WebsitePreferenceKeys()\n WebsitePreferenceKeys_obj.create_default_keys()\n context = {'title': 'Creazione chiavi e temi di default', 'opts':\n self.model._meta, 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser, 'site_url': '/'}\n messages.add_message(request, messages.SUCCESS,\n 'Valori di default creati con successo.')\n return render(request, 'admin/custom_view/create_defaults.html',\n context)\n\n def get_model_perms(self, request):\n \"\"\"\n https://stackoverflow.com/questions/2431727/django-admin-hide-a-model\n\n Return empty perms dict thus hiding the model from admin index.\n Per far funzionare le custom view dell'app website_data ma nascondendo\n tutti i modelli, in questo modo gli url funzionano ma nell'admin non si\n vede nessun modello da modificare/aggiungere.\n \"\"\"\n return {}\n\n\nclass CustomSiteInstanceInline(admin.StackedInline):\n model = CustomSites\n\n\nclass WebsitePreferencesInstanceInline(admin.TabularInline):\n model = WebsitePreferences\n\n\nclass SiteAdmin(admin.ModelAdmin):\n list_filter = 'domain', 'name'\n inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]\n\n\n<mask token>\nadmin.site.unregister(Site)\nadmin.site.register(Site, SiteAdmin)\nadmin.site.register(WebsiteData, WebsiteDataAdmin)\n",
"step-4": "from django.contrib import admin, messages\nfrom django.conf.urls import url\nfrom django.shortcuts import render\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom website_data.models import *\nfrom website_data.forms import *\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass WebsiteDataAdmin(admin.ModelAdmin):\n\n def get_urls(self):\n urls = super(WebsiteDataAdmin, self).get_urls()\n my_urls = [url('^create-defaults/$', self.admin_site.admin_view(\n self.create_defaults))]\n return my_urls + urls\n \"\"\"\n def edit_site(self, request, site_id):\n \"\"Function to select a site to edit\"\"\n\n WebsiteData_obj = WebsiteData()\n Site_obj = Site.objects.get(pk=site_id)\n\n if request.method == 'POST':\n form = EditTextSiteForm(request.POST)\n\n if form.is_valid():\n # TODO: salvo i valori delle relative chiavi\n WebsiteData_obj.set_all_keys_about_site(site_id=site_id, post=request.POST)\n\n # pagina di successo con i dati aggiornati precompilati\n messages.add_message(request, messages.SUCCESS, 'Dati salvati con successo.')\n return HttpResponseRedirect('/admin/website_data/websitedata/edit-site/' + str(site_id)) # Redirect after POST\n else:\n form = EditTextSiteForm() # An unbound form\n # precompilo la post con eventuali valori presenti\n request.POST = WebsiteData_obj.get_all_keys_about_site(site_domain=Site_obj.domain)\n # logger.info(\"chiavi salvate in db per il sito \" + str(site_id) + \": \" + str(request.POST))\n\n context = {\n 'form' : form,\n 'post': request.POST,\n 'title': \"Modifica informazioni sito: \" + str(Site_obj.domain),\n 'opts': self.model._meta,\n 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser,\n 'site_url': '/',\n }\n\n return render(request, 'admin/custom_view/edit_site.html', context)\n \"\"\"\n\n def create_defaults(self, request):\n \"\"\"Function to create default keys and themes\"\"\"\n ThemeKeys_obj = ThemeKeys()\n ThemeKeys_obj.create_default_keys()\n WebsitePreferenceKeys_obj = WebsitePreferenceKeys()\n WebsitePreferenceKeys_obj.create_default_keys()\n context = {'title': 'Creazione chiavi e temi di default', 'opts':\n self.model._meta, 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser, 'site_url': '/'}\n messages.add_message(request, messages.SUCCESS,\n 'Valori di default creati con successo.')\n return render(request, 'admin/custom_view/create_defaults.html',\n context)\n\n def get_model_perms(self, request):\n \"\"\"\n https://stackoverflow.com/questions/2431727/django-admin-hide-a-model\n\n Return empty perms dict thus hiding the model from admin index.\n Per far funzionare le custom view dell'app website_data ma nascondendo\n tutti i modelli, in questo modo gli url funzionano ma nell'admin non si\n vede nessun modello da modificare/aggiungere.\n \"\"\"\n return {}\n\n\nclass CustomSiteInstanceInline(admin.StackedInline):\n model = CustomSites\n\n\nclass WebsitePreferencesInstanceInline(admin.TabularInline):\n model = WebsitePreferences\n\n\nclass SiteAdmin(admin.ModelAdmin):\n list_filter = 'domain', 'name'\n inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]\n\n\n<mask token>\nadmin.site.unregister(Site)\nadmin.site.register(Site, SiteAdmin)\nadmin.site.register(WebsiteData, WebsiteDataAdmin)\n",
"step-5": "from django.contrib import admin, messages\nfrom django.conf.urls import url\nfrom django.shortcuts import render\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom website_data.models import *\nfrom website_data.forms import *\nimport logging\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\nclass WebsiteDataAdmin(admin.ModelAdmin):\n\n # URLs overwriting to add new admin views (with auth check and without cache)\n def get_urls(self):\n urls = super(WebsiteDataAdmin, self).get_urls()\n my_urls = [\n # url(r'^edit-site/(?:(?P<site_id>\\d+)/)$', self.admin_site.admin_view(self.edit_site)),\n url(r'^create-defaults/$', self.admin_site.admin_view(self.create_defaults)),\n ]\n\n # return custom URLs with default URLs\n return my_urls + urls\n\n \"\"\"\n def edit_site(self, request, site_id):\n \"\"Function to select a site to edit\"\"\n\n WebsiteData_obj = WebsiteData()\n Site_obj = Site.objects.get(pk=site_id)\n\n if request.method == 'POST':\n form = EditTextSiteForm(request.POST)\n\n if form.is_valid():\n # TODO: salvo i valori delle relative chiavi\n WebsiteData_obj.set_all_keys_about_site(site_id=site_id, post=request.POST)\n\n # pagina di successo con i dati aggiornati precompilati\n messages.add_message(request, messages.SUCCESS, 'Dati salvati con successo.')\n return HttpResponseRedirect('/admin/website_data/websitedata/edit-site/' + str(site_id)) # Redirect after POST\n else:\n form = EditTextSiteForm() # An unbound form\n # precompilo la post con eventuali valori presenti\n request.POST = WebsiteData_obj.get_all_keys_about_site(site_domain=Site_obj.domain)\n # logger.info(\"chiavi salvate in db per il sito \" + str(site_id) + \": \" + str(request.POST))\n\n context = {\n 'form' : form,\n 'post': request.POST,\n 'title': \"Modifica informazioni sito: \" + str(Site_obj.domain),\n 'opts': self.model._meta,\n 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser,\n 'site_url': '/',\n }\n\n return render(request, 'admin/custom_view/edit_site.html', context)\n \"\"\"\n\n def create_defaults(self, request):\n \"\"\"Function to create default keys and themes\"\"\"\n ThemeKeys_obj = ThemeKeys()\n ThemeKeys_obj.create_default_keys()\n\n WebsitePreferenceKeys_obj = WebsitePreferenceKeys()\n WebsitePreferenceKeys_obj.create_default_keys()\n\n context = {\n 'title': \"Creazione chiavi e temi di default\",\n 'opts': self.model._meta,\n 'app_label': self.model._meta.app_label,\n 'has_permission': request.user.is_superuser,\n 'site_url': '/',\n }\n\n messages.add_message(request, messages.SUCCESS, 'Valori di default creati con successo.')\n\n return render(request, 'admin/custom_view/create_defaults.html', context)\n\n def get_model_perms(self, request):\n \"\"\"\n https://stackoverflow.com/questions/2431727/django-admin-hide-a-model\n\n Return empty perms dict thus hiding the model from admin index.\n Per far funzionare le custom view dell'app website_data ma nascondendo\n tutti i modelli, in questo modo gli url funzionano ma nell'admin non si\n vede nessun modello da modificare/aggiungere.\n \"\"\"\n return {}\n\nclass CustomSiteInstanceInline(admin.StackedInline):\n model = CustomSites\n\nclass WebsitePreferencesInstanceInline(admin.TabularInline):\n model = WebsitePreferences\n\n# Define a new Site admin\nclass SiteAdmin(admin.ModelAdmin):\n list_filter = ('domain', 'name')\n inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]\n\n# TODO: pagine aggiuntive per l'admin (da usare solo per debug o manutenzione)\n\"\"\"\nadmin.site.register(Themes)\nadmin.site.register(ThemeKeys)\nadmin.site.register(WebsitePreferences)\nadmin.site.register(WebsitePreferenceKeys)\nadmin.site.register(CustomSites)\n\"\"\"\n\nadmin.site.unregister(Site)\nadmin.site.register(Site, SiteAdmin)\nadmin.site.register(WebsiteData, WebsiteDataAdmin)\n",
"step-ids": [
4,
10,
12,
14,
15
]
}
|
[
4,
10,
12,
14,
15
] |
class A():
def m(self):
print("Class A")
class B():
def m(self):
print("Class B")
class C(B, A):
print("class C")
obj1 = C()
obj1.m()
print(C.mro()) # Method Resolution Order based on convention of "OBJECT" super class
|
normal
|
{
"blob_id": "3d59b8d6a34935ff332028443276f161430a981c",
"index": 9687,
"step-1": "<mask token>\n\n\nclass B:\n <mask token>\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\n",
"step-2": "class A:\n <mask token>\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\n",
"step-3": "class A:\n\n def m(self):\n print('Class A')\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\n<mask token>\nobj1.m()\nprint(C.mro())\n",
"step-4": "class A:\n\n def m(self):\n print('Class A')\n\n\nclass B:\n\n def m(self):\n print('Class B')\n\n\nclass C(B, A):\n print('class C')\n\n\nobj1 = C()\nobj1.m()\nprint(C.mro())\n",
"step-5": "class A():\n def m(self):\n print(\"Class A\")\n\nclass B():\n def m(self):\n print(\"Class B\")\n\nclass C(B, A):\n print(\"class C\")\n\nobj1 = C()\n\nobj1.m()\n\nprint(C.mro()) # Method Resolution Order based on convention of \"OBJECT\" super class\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class NetworkDevice:
<|reserved_special_token_0|>
def __init__(self, **kwargs):
log.info('__init__')
self.ip = ''
self.username = ''
self.password = ''
self.device_type = ''
self.port = 22
self.timeout = 10
self._protocol = 'ssh'
self.enable_mode = False
self.enable_password = ''
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ['#', '>']
self.list_of_possible_ending_prompts = ['(config-line)#',
'(config-if)#', '(config)#', '>', '#']
self._carriage_return_for_send_command = '\n'
self._send_command_error_in_returned_output = []
self._telnet_connect_login = 'Username:'
self._telnet_connect_password = 'Password:'
self._telnet_connect_authentication_fail_prompt = [':', '%']
self.cmd_enable = 'enable'
self.cmd_disable_paging = 'terminal length 0'
self.cmd_enter_config_mode = 'configure terminal'
self.cmd_exit_config_mode = 'exit'
self.cmd_get_version = 'show version'
self.cmd_get_hostname = 'show version | include uptime'
self.cmd_get_model = 'show inventory'
self.cmd_get_serial_number = 'show inventory | i SN'
self.cmd_get_config = 'show running-config'
self.cmd_save_config = 'write memory'
self.cmd_get_interfaces = [
'interface ethernet print terse without-paging',
'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'
, 'interface bridge port print terse without-paging']
self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',
'interface ethernet disable <INTERFACE>',
'interface ethernet comment <INTERFACE> "<COMMENT>"',
'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',
'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'
]
self.cmd_get_mac_address_table = (
'interface bridge host print without-paging')
self.cmd_get_arp = 'ip arp print terse without-paging'
self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'
self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'
self.cmd_add_vlan = (
'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
)
self.cmd_remove_vlan = (
'interface bridge vlan remove [find vlan-ids=<VLAN>]')
self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_remove_interface_from_vlan = [
'interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_get_routing_table = 'ip route print without-paging terse'
self.cmd_get_interfaces_ip = 'ip address print terse without-paging'
self.cmd_add_static_route = (
'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'
)
self.cmd_remove_static_route = (
'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')
log.debug('__init__: kwargs: ' + str(kwargs))
if 'ip' in kwargs:
self.ip = kwargs['ip']
log.info('__init__: ip found: ' + str(self.ip))
if 'username' in kwargs:
self.username = kwargs['username']
log.info('__init__: username found: ' + str(self.username))
if 'password' in kwargs:
self.password = kwargs['password']
log.debug('__init__: password found: ' + str(self.password))
if 'device_type' in kwargs:
self.device_type = kwargs['device_type']
log.info('__init__: device_type found: ' + str(self.device_type))
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
log.info('__init__: timeout found: ' + str(self.timeout))
if 'protocol' in kwargs:
self._protocol = kwargs['protocol'].lower()
log.info('__init__: protocol found: ' + str(self._protocol))
if self._protocol.lower() == 'telnet':
self.port = 23
if 'port' in kwargs:
self.port = kwargs['port']
log.info('__init__: port found: ' + str(self.port))
if 'enable_mode' in kwargs:
self.enable_mode = kwargs['enable_mode']
log.info('__init__: enable_mode found: ' + str(self.enable_mode))
if 'enable_password' in kwargs:
self.enable_password = kwargs['enable_password']
log.info('__init__: enable_password found: ' + str(self.
enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
await self.connect()
except Exception:
await self.disconnect()
raise
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt = text.split('\n')[-1]
prompt = text.split('\r')[-1]
log.info(f"find_prompt: prompt: '{prompt}'")
self.possible_prompts = self.get_possible_prompts(prompt)
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
list_of_prompts = []
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
my_prompt = prompt
for ending in list_of_possible_ending_prompts:
if my_prompt.endswith(ending):
my_prompt = my_prompt[:-len(ending)]
break
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'"
)
for ending in list_of_possible_ending_prompts:
list_of_prompts.append(my_prompt + ending)
log.info(
f'get_possible_prompts: list of possible prompts: {list_of_prompts}'
)
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt_found = False
for prompt in self.possible_prompts:
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
if prompt in text:
prompt_found = True
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
break
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
log.info(f"remove_command_in_output: cmd = '{cmd}'")
log.info(
f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
output = text.split(cmd + '\n')[-1]
log.info(f"remove_command_in_output: output = '{output}'")
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
log.info('remove_starting_carriage_return_in_output')
output = text.lstrip('\r\n\r')
log.info(
f"remove_starting_carriage_return_in_output: output = '{output}'")
return output
<|reserved_special_token_0|>
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
log.info('check_error_output')
if output:
log.info('check_error_output: output has some data')
for element in self._send_command_error_in_returned_output:
log.info(f'check_error_output: element: {element}')
log.info(f'check_error_output: output[0]: {output[0]}')
if output.startswith(element):
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
output = ''
esc_found = 0
for i in text:
if esc_found == 0:
if i == '\x1b':
log.info('Esc!')
esc_found = 1
else:
output += i
elif esc_found == 1:
if i == '[':
log.info('CSI sequence')
esc_found = 2
else:
output += '\x1b' + i
esc_found = 0
elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':
log.info('End of escape sequence')
esc_found = 0
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
log.info('disable_paging')
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
log.info('connect')
try:
if self._protocol == 'ssh':
await self.connectSSH()
elif self._protocol == 'telnet':
await self.connectTelnet()
else:
raise Exception(
f'connect: unsupported protocol: {self._protocol}')
except Exception:
log.info('connect: connection error')
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
log.info('connectSSH')
generator = asyncssh.connect(self.ip, username=self.username,
password=self.password, known_hosts=None, encryption_algs=[algs
.decode('utf-8') for algs in asyncssh.encryption._enc_algs])
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
log.error(
f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
raise asyncio.exceptions.TimeoutError(
'Connection failed: connection timed out.')
except Exception as error:
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
raise
log.info('connectSSH: connection success')
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type
='netscud')
log.info('connectSSH: open_session success')
data = ''
prompt_not_found = True
try:
while prompt_not_found:
log.info('connectSSH: beginning of the loop')
data += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f"connectSSH: data: '{str(data)}'")
log.info(
f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
for prompt in self._connect_first_ending_prompt:
if data.endswith(prompt):
log.info(
f"connectSSH: first ending prompt found: '{prompt}'"
)
prompt_not_found = False
break
log.info('connectSSH: end of loop')
except Exception as error:
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
raise
log.info(f'connectSSH: end of prompt loop')
data = self.remove_ansi_escape_sequence(data)
self.prompt = self.find_prompt(str(data))
log.info(f"connectSSH: prompt found: '{self.prompt}'")
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
if self.cmd_disable_paging:
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
log.info('connectTelnet')
try:
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
log.error(
f"connectTelnet: preparation to the connection failed: '{error}'"
)
raise
log.info('connectTelnet: preparation to the connection success')
try:
self._reader, self._writer = await asyncio.wait_for(conn,
timeout=self.timeout)
except asyncio.TimeoutError:
log.error('connectTelnet: connection: timeout')
raise
log.info('connectTelnet: connection success')
prompt = self._telnet_connect_login
prompt_password = self._telnet_connect_password
use_login = True
output = ''
byte_data = b''
while True:
log.info(f'connectTelnet: read data for prompt')
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f'connectTelnet: byte_data: {byte_data}')
output = str(byte_data)
log.info(f'connectTelnet: output: {output}')
if prompt in output:
break
elif prompt_password in output:
use_login = False
break
log.info(f"connectTelnet: login prompt: '{output}'")
if use_login:
log.info('connectTelnet: sending login')
try:
await self.send_command(self.username, prompt_password)
log.info('connectTelnet: login sent')
except Exception:
raise
log.info('connectTelnet: sending password')
try:
output = await self.telnet_send_command_with_unexpected_pattern(
self.password, self._connect_first_ending_prompt, self.
_telnet_connect_authentication_fail_prompt)
except Exception:
raise
log.info('connectTelnet: password sent')
self.prompt = self.find_prompt(str(output))
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
if self.enable_mode:
log.info('connectTelnet: enable mode to be activated')
try:
await self.send_command(self.cmd_enable, prompt_password)
log.info('connectTelnet: enable command sent')
log.info('connectTelnet: sending enable password')
await self.telnet_send_command_with_unexpected_pattern(self
.enable_password, self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt)
log.info('connectTelnet: enable password sent')
except Exception:
log.info('connectTelnet: enable password failure')
raise
if self.cmd_disable_paging:
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnect')
if self._protocol == 'ssh':
await self.disconnectSSH()
elif self._protocol == 'telnet':
await self.disconnectTelnet()
else:
raise Exception(f'Unsupported protocol: {self._protocol}')
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectSSH')
if self.conn:
self.conn.close()
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectTelnet')
if self._writer:
self._writer.close()
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_command')
if timeout is None:
timeout = self.timeout
if self._protocol == 'ssh':
output = await self.send_commandSSH(cmd, pattern=pattern,
timeout=timeout)
elif self._protocol == 'telnet':
output = await self.send_commandTelnet(cmd, pattern=pattern,
timeout=timeout)
else:
raise Exception(
f'send_command: unsupported protocol: {self._protocol}')
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandSSH')
if timeout is None:
timeout = self.timeout
log.info(f"send_commandSSH: cmd = '{cmd}'")
self.stdinx.write(cmd + self._carriage_return_for_send_command)
log.info('send_commandSSH: command sent')
output = ''
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
output = self.remove_ansi_escape_sequence(output)
output = output.replace('\r', '')
log.info(f"send_commandSSH: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_commandSSH: raw output: '{output}'
send_commandSSH: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandSSH: cleaned output: '{output}'
send_commandSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandTelnet')
if timeout is None:
timeout = self.timeout
cmd = cmd + '\n'
self._writer.write(cmd.encode())
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
output = str(byte_data)
log.info(f"send_commandTelnet: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_commandTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_commandTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_commandTelnet: raw output: '{output}'
send_commandTelnet: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandTelnet: cleaned output: '{output}'
send_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def telnet_send_command_with_unexpected_pattern(self, cmd,
pattern, error_pattern=None, timeout=None):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
log.info('telnet_send_command_with_unexpected_pattern')
if timeout is None:
timeout = self.timeout
cmd = cmd + self._carriage_return_for_send_command
self._writer.write(cmd.encode())
output = ''
byte_data = b''
pattern_not_found = True
try:
while pattern_not_found:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
output = str(byte_data)
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
if pattern:
for prompt in pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
if prompt in output:
pattern_not_found = False
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
break
if error_pattern and pattern_not_found:
for bad_prompt in error_pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
if bad_prompt in output:
log.error(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
raise Exception(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
except asyncio.TimeoutError:
await self.disconnect()
log.error(
'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'
)
raise
except Exception as error:
await self.disconnect()
log.error(
f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'
)
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""telnet_send_command_with_unexpected_pattern: raw output: '{output}'
telnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'
telnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"""
)
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_set')
if timeout is None:
timeout = self.timeout
log.info('send_command')
if self._protocol == 'ssh':
output = await self.send_config_setSSH(cmds, timeout)
elif self._protocol == 'telnet':
output = await self.send_config_setTelnet(cmds, timeout)
else:
raise Exception(
f'send_config_set: unsupported protocol: {self._protocol}')
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setSSH')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'
)
return returned_output
log.info('send_config_set: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: configuration mode entered')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command sent')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command to leave configuration mode sent'
)
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setTelnet')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'
)
return returned_output
log.info('send_config_setTelnet: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: configuration mode entered')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: command sent')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info(
'send_config_setTelnet: command to leave configuration mode sent')
output = ''
byte_data = b''
loop = 3
try:
while loop:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
if self.check_if_prompt_is_found(output):
break
loop -= 1
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
log.info('get_version')
version = ''
output = await self.send_command(self.cmd_get_version)
version = output.split('Version ')[1].split(',')[0]
log.info(f'get_version: version: {version}')
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
log.info('get_hostname')
output = await self.send_command(self.cmd_get_hostname)
log.info(f"get_hostname: output: '{output}'")
output = output.split()[0]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
log.info('get_model')
output = await self.send_command(self.cmd_get_model)
log.info(f"get_model: output: '{output}'")
output = output.split('"')[3]
log.info(f"get_model: model found: '{output}'")
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
log.info('get_serial_number')
output = await self.send_command(self.cmd_get_serial_number)
log.info(f"get_serial_number: output: '{output}'")
output = output.splitlines()[0].split()[-1]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
log.info('get_config')
if timeout is None:
timeout = self.timeout
output = await self.send_command(self.cmd_get_config, timeout=timeout)
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
log.info('save_config')
output = await self.send_command(self.cmd_save_config)
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NetworkDevice:
<|reserved_special_token_0|>
def __init__(self, **kwargs):
log.info('__init__')
self.ip = ''
self.username = ''
self.password = ''
self.device_type = ''
self.port = 22
self.timeout = 10
self._protocol = 'ssh'
self.enable_mode = False
self.enable_password = ''
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ['#', '>']
self.list_of_possible_ending_prompts = ['(config-line)#',
'(config-if)#', '(config)#', '>', '#']
self._carriage_return_for_send_command = '\n'
self._send_command_error_in_returned_output = []
self._telnet_connect_login = 'Username:'
self._telnet_connect_password = 'Password:'
self._telnet_connect_authentication_fail_prompt = [':', '%']
self.cmd_enable = 'enable'
self.cmd_disable_paging = 'terminal length 0'
self.cmd_enter_config_mode = 'configure terminal'
self.cmd_exit_config_mode = 'exit'
self.cmd_get_version = 'show version'
self.cmd_get_hostname = 'show version | include uptime'
self.cmd_get_model = 'show inventory'
self.cmd_get_serial_number = 'show inventory | i SN'
self.cmd_get_config = 'show running-config'
self.cmd_save_config = 'write memory'
self.cmd_get_interfaces = [
'interface ethernet print terse without-paging',
'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'
, 'interface bridge port print terse without-paging']
self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',
'interface ethernet disable <INTERFACE>',
'interface ethernet comment <INTERFACE> "<COMMENT>"',
'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',
'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'
]
self.cmd_get_mac_address_table = (
'interface bridge host print without-paging')
self.cmd_get_arp = 'ip arp print terse without-paging'
self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'
self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'
self.cmd_add_vlan = (
'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
)
self.cmd_remove_vlan = (
'interface bridge vlan remove [find vlan-ids=<VLAN>]')
self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_remove_interface_from_vlan = [
'interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_get_routing_table = 'ip route print without-paging terse'
self.cmd_get_interfaces_ip = 'ip address print terse without-paging'
self.cmd_add_static_route = (
'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'
)
self.cmd_remove_static_route = (
'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')
log.debug('__init__: kwargs: ' + str(kwargs))
if 'ip' in kwargs:
self.ip = kwargs['ip']
log.info('__init__: ip found: ' + str(self.ip))
if 'username' in kwargs:
self.username = kwargs['username']
log.info('__init__: username found: ' + str(self.username))
if 'password' in kwargs:
self.password = kwargs['password']
log.debug('__init__: password found: ' + str(self.password))
if 'device_type' in kwargs:
self.device_type = kwargs['device_type']
log.info('__init__: device_type found: ' + str(self.device_type))
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
log.info('__init__: timeout found: ' + str(self.timeout))
if 'protocol' in kwargs:
self._protocol = kwargs['protocol'].lower()
log.info('__init__: protocol found: ' + str(self._protocol))
if self._protocol.lower() == 'telnet':
self.port = 23
if 'port' in kwargs:
self.port = kwargs['port']
log.info('__init__: port found: ' + str(self.port))
if 'enable_mode' in kwargs:
self.enable_mode = kwargs['enable_mode']
log.info('__init__: enable_mode found: ' + str(self.enable_mode))
if 'enable_password' in kwargs:
self.enable_password = kwargs['enable_password']
log.info('__init__: enable_password found: ' + str(self.
enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
await self.connect()
except Exception:
await self.disconnect()
raise
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt = text.split('\n')[-1]
prompt = text.split('\r')[-1]
log.info(f"find_prompt: prompt: '{prompt}'")
self.possible_prompts = self.get_possible_prompts(prompt)
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
list_of_prompts = []
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
my_prompt = prompt
for ending in list_of_possible_ending_prompts:
if my_prompt.endswith(ending):
my_prompt = my_prompt[:-len(ending)]
break
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'"
)
for ending in list_of_possible_ending_prompts:
list_of_prompts.append(my_prompt + ending)
log.info(
f'get_possible_prompts: list of possible prompts: {list_of_prompts}'
)
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt_found = False
for prompt in self.possible_prompts:
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
if prompt in text:
prompt_found = True
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
break
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
log.info(f"remove_command_in_output: cmd = '{cmd}'")
log.info(
f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
output = text.split(cmd + '\n')[-1]
log.info(f"remove_command_in_output: output = '{output}'")
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
log.info('remove_starting_carriage_return_in_output')
output = text.lstrip('\r\n\r')
log.info(
f"remove_starting_carriage_return_in_output: output = '{output}'")
return output
def remove_ending_prompt_in_output(self, text):
"""
Method removing the prompt at the end of a string
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
log.info('remove_ending_prompt_in_output')
for prompt in self.possible_prompts:
log.info(f"remove_ending_prompt_in_output: prompt: '{prompt}'")
if prompt in text:
text = text[:-len(prompt)]
text = text.rstrip('\r\n')
break
log.info(
f"remove_ending_prompt_in_output: text without prompt:\n'{text}'")
return text
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
log.info('check_error_output')
if output:
log.info('check_error_output: output has some data')
for element in self._send_command_error_in_returned_output:
log.info(f'check_error_output: element: {element}')
log.info(f'check_error_output: output[0]: {output[0]}')
if output.startswith(element):
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
output = ''
esc_found = 0
for i in text:
if esc_found == 0:
if i == '\x1b':
log.info('Esc!')
esc_found = 1
else:
output += i
elif esc_found == 1:
if i == '[':
log.info('CSI sequence')
esc_found = 2
else:
output += '\x1b' + i
esc_found = 0
elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':
log.info('End of escape sequence')
esc_found = 0
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
log.info('disable_paging')
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
log.info('connect')
try:
if self._protocol == 'ssh':
await self.connectSSH()
elif self._protocol == 'telnet':
await self.connectTelnet()
else:
raise Exception(
f'connect: unsupported protocol: {self._protocol}')
except Exception:
log.info('connect: connection error')
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
log.info('connectSSH')
generator = asyncssh.connect(self.ip, username=self.username,
password=self.password, known_hosts=None, encryption_algs=[algs
.decode('utf-8') for algs in asyncssh.encryption._enc_algs])
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
log.error(
f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
raise asyncio.exceptions.TimeoutError(
'Connection failed: connection timed out.')
except Exception as error:
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
raise
log.info('connectSSH: connection success')
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type
='netscud')
log.info('connectSSH: open_session success')
data = ''
prompt_not_found = True
try:
while prompt_not_found:
log.info('connectSSH: beginning of the loop')
data += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f"connectSSH: data: '{str(data)}'")
log.info(
f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
for prompt in self._connect_first_ending_prompt:
if data.endswith(prompt):
log.info(
f"connectSSH: first ending prompt found: '{prompt}'"
)
prompt_not_found = False
break
log.info('connectSSH: end of loop')
except Exception as error:
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
raise
log.info(f'connectSSH: end of prompt loop')
data = self.remove_ansi_escape_sequence(data)
self.prompt = self.find_prompt(str(data))
log.info(f"connectSSH: prompt found: '{self.prompt}'")
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
if self.cmd_disable_paging:
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
log.info('connectTelnet')
try:
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
log.error(
f"connectTelnet: preparation to the connection failed: '{error}'"
)
raise
log.info('connectTelnet: preparation to the connection success')
try:
self._reader, self._writer = await asyncio.wait_for(conn,
timeout=self.timeout)
except asyncio.TimeoutError:
log.error('connectTelnet: connection: timeout')
raise
log.info('connectTelnet: connection success')
prompt = self._telnet_connect_login
prompt_password = self._telnet_connect_password
use_login = True
output = ''
byte_data = b''
while True:
log.info(f'connectTelnet: read data for prompt')
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f'connectTelnet: byte_data: {byte_data}')
output = str(byte_data)
log.info(f'connectTelnet: output: {output}')
if prompt in output:
break
elif prompt_password in output:
use_login = False
break
log.info(f"connectTelnet: login prompt: '{output}'")
if use_login:
log.info('connectTelnet: sending login')
try:
await self.send_command(self.username, prompt_password)
log.info('connectTelnet: login sent')
except Exception:
raise
log.info('connectTelnet: sending password')
try:
output = await self.telnet_send_command_with_unexpected_pattern(
self.password, self._connect_first_ending_prompt, self.
_telnet_connect_authentication_fail_prompt)
except Exception:
raise
log.info('connectTelnet: password sent')
self.prompt = self.find_prompt(str(output))
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
if self.enable_mode:
log.info('connectTelnet: enable mode to be activated')
try:
await self.send_command(self.cmd_enable, prompt_password)
log.info('connectTelnet: enable command sent')
log.info('connectTelnet: sending enable password')
await self.telnet_send_command_with_unexpected_pattern(self
.enable_password, self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt)
log.info('connectTelnet: enable password sent')
except Exception:
log.info('connectTelnet: enable password failure')
raise
if self.cmd_disable_paging:
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnect')
if self._protocol == 'ssh':
await self.disconnectSSH()
elif self._protocol == 'telnet':
await self.disconnectTelnet()
else:
raise Exception(f'Unsupported protocol: {self._protocol}')
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectSSH')
if self.conn:
self.conn.close()
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectTelnet')
if self._writer:
self._writer.close()
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_command')
if timeout is None:
timeout = self.timeout
if self._protocol == 'ssh':
output = await self.send_commandSSH(cmd, pattern=pattern,
timeout=timeout)
elif self._protocol == 'telnet':
output = await self.send_commandTelnet(cmd, pattern=pattern,
timeout=timeout)
else:
raise Exception(
f'send_command: unsupported protocol: {self._protocol}')
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandSSH')
if timeout is None:
timeout = self.timeout
log.info(f"send_commandSSH: cmd = '{cmd}'")
self.stdinx.write(cmd + self._carriage_return_for_send_command)
log.info('send_commandSSH: command sent')
output = ''
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
output = self.remove_ansi_escape_sequence(output)
output = output.replace('\r', '')
log.info(f"send_commandSSH: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_commandSSH: raw output: '{output}'
send_commandSSH: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandSSH: cleaned output: '{output}'
send_commandSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandTelnet')
if timeout is None:
timeout = self.timeout
cmd = cmd + '\n'
self._writer.write(cmd.encode())
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
output = str(byte_data)
log.info(f"send_commandTelnet: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_commandTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_commandTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_commandTelnet: raw output: '{output}'
send_commandTelnet: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandTelnet: cleaned output: '{output}'
send_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def telnet_send_command_with_unexpected_pattern(self, cmd,
pattern, error_pattern=None, timeout=None):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
log.info('telnet_send_command_with_unexpected_pattern')
if timeout is None:
timeout = self.timeout
cmd = cmd + self._carriage_return_for_send_command
self._writer.write(cmd.encode())
output = ''
byte_data = b''
pattern_not_found = True
try:
while pattern_not_found:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
output = str(byte_data)
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
if pattern:
for prompt in pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
if prompt in output:
pattern_not_found = False
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
break
if error_pattern and pattern_not_found:
for bad_prompt in error_pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
if bad_prompt in output:
log.error(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
raise Exception(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
except asyncio.TimeoutError:
await self.disconnect()
log.error(
'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'
)
raise
except Exception as error:
await self.disconnect()
log.error(
f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'
)
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""telnet_send_command_with_unexpected_pattern: raw output: '{output}'
telnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'
telnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"""
)
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_set')
if timeout is None:
timeout = self.timeout
log.info('send_command')
if self._protocol == 'ssh':
output = await self.send_config_setSSH(cmds, timeout)
elif self._protocol == 'telnet':
output = await self.send_config_setTelnet(cmds, timeout)
else:
raise Exception(
f'send_config_set: unsupported protocol: {self._protocol}')
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setSSH')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'
)
return returned_output
log.info('send_config_set: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: configuration mode entered')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command sent')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command to leave configuration mode sent'
)
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setTelnet')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'
)
return returned_output
log.info('send_config_setTelnet: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: configuration mode entered')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: command sent')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info(
'send_config_setTelnet: command to leave configuration mode sent')
output = ''
byte_data = b''
loop = 3
try:
while loop:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
if self.check_if_prompt_is_found(output):
break
loop -= 1
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
log.info('get_version')
version = ''
output = await self.send_command(self.cmd_get_version)
version = output.split('Version ')[1].split(',')[0]
log.info(f'get_version: version: {version}')
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
log.info('get_hostname')
output = await self.send_command(self.cmd_get_hostname)
log.info(f"get_hostname: output: '{output}'")
output = output.split()[0]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
log.info('get_model')
output = await self.send_command(self.cmd_get_model)
log.info(f"get_model: output: '{output}'")
output = output.split('"')[3]
log.info(f"get_model: model found: '{output}'")
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
log.info('get_serial_number')
output = await self.send_command(self.cmd_get_serial_number)
log.info(f"get_serial_number: output: '{output}'")
output = output.splitlines()[0].split()[-1]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
log.info('get_config')
if timeout is None:
timeout = self.timeout
output = await self.send_command(self.cmd_get_config, timeout=timeout)
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
log.info('save_config')
output = await self.send_command(self.cmd_save_config)
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.DEBUG)
asyncssh.set_debug_level(2)
<|reserved_special_token_0|>
class NetworkDevice:
"""
Base class for network object
:param ip: IP address of a device
:type ip: str
:param username: Username used to connect to a device
:type username: str
:param password: Password used to connect to a device
:type password: str
:param device_type: Type of device used
:type device_type: str
:param port: TCP port used to connect a device. Default value is "22" for SSH
:type port: int, optional
:param timeout: TCP port used to connect a device. Default value is 10 seconds
:type timeout: int, optional
:param _protocol: Protocol used to connect a device. "ssh" or "telnet" are possible options. Default value is "ssh"
:type _protocol: str, optional
:param enable_mode: Enable mode for devices requiring it. Default value is "False"
:type enable_mode: bool, optional
:param enable_password: Enable password used for enable mode.
:type enable_password: str, optional
:param conn: Variable used for the management of the SSH connection
:type conn: SSHClientConnection object
:param _writer: Variable used for the management of the Telnet connection and writing channel
:type _writer: StreamWriter object
:param _reader: Variable used for the management of the Telnet reading channel
:type _reader: StreamReader object
:param possible_prompts: Used by the connect method to list all possible prompts of the device
:type possible_prompts: list
:param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt
:type _connect_first_ending_prompt: list
:param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands
:type list_of_possible_ending_prompts: list
:param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_login: str
:param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_password: list
:param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events
:type _telnet_connect_authentication_fail_prompt: list
:param cmd_enable: Enable command for entering into enable mode
:type cmd_enable: str
:param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time
:type cmd_disable_paging: str
:param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.
:type cmd_enter_config_mode: str
:param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.
:type cmd_exit_config_mode: str
:param cmd_get_version: API command used to get the software version of a device
:type cmd_get_version: str
:param cmd_get_hostname: API command used to get the hostname of a device
:type cmd_get_hostname: str
:param cmd_get_model: API command used to get the model of a device
:type cmd_get_model: str
:param cmd_get_serial_number: API command used to get the serial number of a device
:type cmd_get_serial_number: str
:param cmd_get_config: API command used to get the running configuration of a device
:type cmd_get_config: str
:param cmd_save_config: API command used to save the running configuration on the device
:type cmd_save_config: str
"""
def __init__(self, **kwargs):
log.info('__init__')
self.ip = ''
self.username = ''
self.password = ''
self.device_type = ''
self.port = 22
self.timeout = 10
self._protocol = 'ssh'
self.enable_mode = False
self.enable_password = ''
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ['#', '>']
self.list_of_possible_ending_prompts = ['(config-line)#',
'(config-if)#', '(config)#', '>', '#']
self._carriage_return_for_send_command = '\n'
self._send_command_error_in_returned_output = []
self._telnet_connect_login = 'Username:'
self._telnet_connect_password = 'Password:'
self._telnet_connect_authentication_fail_prompt = [':', '%']
self.cmd_enable = 'enable'
self.cmd_disable_paging = 'terminal length 0'
self.cmd_enter_config_mode = 'configure terminal'
self.cmd_exit_config_mode = 'exit'
self.cmd_get_version = 'show version'
self.cmd_get_hostname = 'show version | include uptime'
self.cmd_get_model = 'show inventory'
self.cmd_get_serial_number = 'show inventory | i SN'
self.cmd_get_config = 'show running-config'
self.cmd_save_config = 'write memory'
self.cmd_get_interfaces = [
'interface ethernet print terse without-paging',
'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'
, 'interface bridge port print terse without-paging']
self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',
'interface ethernet disable <INTERFACE>',
'interface ethernet comment <INTERFACE> "<COMMENT>"',
'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',
'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'
]
self.cmd_get_mac_address_table = (
'interface bridge host print without-paging')
self.cmd_get_arp = 'ip arp print terse without-paging'
self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'
self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'
self.cmd_add_vlan = (
'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
)
self.cmd_remove_vlan = (
'interface bridge vlan remove [find vlan-ids=<VLAN>]')
self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_remove_interface_from_vlan = [
'interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_get_routing_table = 'ip route print without-paging terse'
self.cmd_get_interfaces_ip = 'ip address print terse without-paging'
self.cmd_add_static_route = (
'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'
)
self.cmd_remove_static_route = (
'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')
log.debug('__init__: kwargs: ' + str(kwargs))
if 'ip' in kwargs:
self.ip = kwargs['ip']
log.info('__init__: ip found: ' + str(self.ip))
if 'username' in kwargs:
self.username = kwargs['username']
log.info('__init__: username found: ' + str(self.username))
if 'password' in kwargs:
self.password = kwargs['password']
log.debug('__init__: password found: ' + str(self.password))
if 'device_type' in kwargs:
self.device_type = kwargs['device_type']
log.info('__init__: device_type found: ' + str(self.device_type))
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
log.info('__init__: timeout found: ' + str(self.timeout))
if 'protocol' in kwargs:
self._protocol = kwargs['protocol'].lower()
log.info('__init__: protocol found: ' + str(self._protocol))
if self._protocol.lower() == 'telnet':
self.port = 23
if 'port' in kwargs:
self.port = kwargs['port']
log.info('__init__: port found: ' + str(self.port))
if 'enable_mode' in kwargs:
self.enable_mode = kwargs['enable_mode']
log.info('__init__: enable_mode found: ' + str(self.enable_mode))
if 'enable_password' in kwargs:
self.enable_password = kwargs['enable_password']
log.info('__init__: enable_password found: ' + str(self.
enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
await self.connect()
except Exception:
await self.disconnect()
raise
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt = text.split('\n')[-1]
prompt = text.split('\r')[-1]
log.info(f"find_prompt: prompt: '{prompt}'")
self.possible_prompts = self.get_possible_prompts(prompt)
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
list_of_prompts = []
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
my_prompt = prompt
for ending in list_of_possible_ending_prompts:
if my_prompt.endswith(ending):
my_prompt = my_prompt[:-len(ending)]
break
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'"
)
for ending in list_of_possible_ending_prompts:
list_of_prompts.append(my_prompt + ending)
log.info(
f'get_possible_prompts: list of possible prompts: {list_of_prompts}'
)
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt_found = False
for prompt in self.possible_prompts:
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
if prompt in text:
prompt_found = True
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
break
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
log.info(f"remove_command_in_output: cmd = '{cmd}'")
log.info(
f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
output = text.split(cmd + '\n')[-1]
log.info(f"remove_command_in_output: output = '{output}'")
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
log.info('remove_starting_carriage_return_in_output')
output = text.lstrip('\r\n\r')
log.info(
f"remove_starting_carriage_return_in_output: output = '{output}'")
return output
def remove_ending_prompt_in_output(self, text):
"""
Method removing the prompt at the end of a string
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
log.info('remove_ending_prompt_in_output')
for prompt in self.possible_prompts:
log.info(f"remove_ending_prompt_in_output: prompt: '{prompt}'")
if prompt in text:
text = text[:-len(prompt)]
text = text.rstrip('\r\n')
break
log.info(
f"remove_ending_prompt_in_output: text without prompt:\n'{text}'")
return text
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
log.info('check_error_output')
if output:
log.info('check_error_output: output has some data')
for element in self._send_command_error_in_returned_output:
log.info(f'check_error_output: element: {element}')
log.info(f'check_error_output: output[0]: {output[0]}')
if output.startswith(element):
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
output = ''
esc_found = 0
for i in text:
if esc_found == 0:
if i == '\x1b':
log.info('Esc!')
esc_found = 1
else:
output += i
elif esc_found == 1:
if i == '[':
log.info('CSI sequence')
esc_found = 2
else:
output += '\x1b' + i
esc_found = 0
elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':
log.info('End of escape sequence')
esc_found = 0
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
log.info('disable_paging')
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
log.info('connect')
try:
if self._protocol == 'ssh':
await self.connectSSH()
elif self._protocol == 'telnet':
await self.connectTelnet()
else:
raise Exception(
f'connect: unsupported protocol: {self._protocol}')
except Exception:
log.info('connect: connection error')
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
log.info('connectSSH')
generator = asyncssh.connect(self.ip, username=self.username,
password=self.password, known_hosts=None, encryption_algs=[algs
.decode('utf-8') for algs in asyncssh.encryption._enc_algs])
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
log.error(
f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
raise asyncio.exceptions.TimeoutError(
'Connection failed: connection timed out.')
except Exception as error:
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
raise
log.info('connectSSH: connection success')
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type
='netscud')
log.info('connectSSH: open_session success')
data = ''
prompt_not_found = True
try:
while prompt_not_found:
log.info('connectSSH: beginning of the loop')
data += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f"connectSSH: data: '{str(data)}'")
log.info(
f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
for prompt in self._connect_first_ending_prompt:
if data.endswith(prompt):
log.info(
f"connectSSH: first ending prompt found: '{prompt}'"
)
prompt_not_found = False
break
log.info('connectSSH: end of loop')
except Exception as error:
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
raise
log.info(f'connectSSH: end of prompt loop')
data = self.remove_ansi_escape_sequence(data)
self.prompt = self.find_prompt(str(data))
log.info(f"connectSSH: prompt found: '{self.prompt}'")
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
if self.cmd_disable_paging:
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
log.info('connectTelnet')
try:
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
log.error(
f"connectTelnet: preparation to the connection failed: '{error}'"
)
raise
log.info('connectTelnet: preparation to the connection success')
try:
self._reader, self._writer = await asyncio.wait_for(conn,
timeout=self.timeout)
except asyncio.TimeoutError:
log.error('connectTelnet: connection: timeout')
raise
log.info('connectTelnet: connection success')
prompt = self._telnet_connect_login
prompt_password = self._telnet_connect_password
use_login = True
output = ''
byte_data = b''
while True:
log.info(f'connectTelnet: read data for prompt')
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f'connectTelnet: byte_data: {byte_data}')
output = str(byte_data)
log.info(f'connectTelnet: output: {output}')
if prompt in output:
break
elif prompt_password in output:
use_login = False
break
log.info(f"connectTelnet: login prompt: '{output}'")
if use_login:
log.info('connectTelnet: sending login')
try:
await self.send_command(self.username, prompt_password)
log.info('connectTelnet: login sent')
except Exception:
raise
log.info('connectTelnet: sending password')
try:
output = await self.telnet_send_command_with_unexpected_pattern(
self.password, self._connect_first_ending_prompt, self.
_telnet_connect_authentication_fail_prompt)
except Exception:
raise
log.info('connectTelnet: password sent')
self.prompt = self.find_prompt(str(output))
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
if self.enable_mode:
log.info('connectTelnet: enable mode to be activated')
try:
await self.send_command(self.cmd_enable, prompt_password)
log.info('connectTelnet: enable command sent')
log.info('connectTelnet: sending enable password')
await self.telnet_send_command_with_unexpected_pattern(self
.enable_password, self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt)
log.info('connectTelnet: enable password sent')
except Exception:
log.info('connectTelnet: enable password failure')
raise
if self.cmd_disable_paging:
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnect')
if self._protocol == 'ssh':
await self.disconnectSSH()
elif self._protocol == 'telnet':
await self.disconnectTelnet()
else:
raise Exception(f'Unsupported protocol: {self._protocol}')
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectSSH')
if self.conn:
self.conn.close()
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectTelnet')
if self._writer:
self._writer.close()
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_command')
if timeout is None:
timeout = self.timeout
if self._protocol == 'ssh':
output = await self.send_commandSSH(cmd, pattern=pattern,
timeout=timeout)
elif self._protocol == 'telnet':
output = await self.send_commandTelnet(cmd, pattern=pattern,
timeout=timeout)
else:
raise Exception(
f'send_command: unsupported protocol: {self._protocol}')
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandSSH')
if timeout is None:
timeout = self.timeout
log.info(f"send_commandSSH: cmd = '{cmd}'")
self.stdinx.write(cmd + self._carriage_return_for_send_command)
log.info('send_commandSSH: command sent')
output = ''
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
output = self.remove_ansi_escape_sequence(output)
output = output.replace('\r', '')
log.info(f"send_commandSSH: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_commandSSH: raw output: '{output}'
send_commandSSH: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandSSH: cleaned output: '{output}'
send_commandSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandTelnet')
if timeout is None:
timeout = self.timeout
cmd = cmd + '\n'
self._writer.write(cmd.encode())
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
output = str(byte_data)
log.info(f"send_commandTelnet: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_commandTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_commandTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_commandTelnet: raw output: '{output}'
send_commandTelnet: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandTelnet: cleaned output: '{output}'
send_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def telnet_send_command_with_unexpected_pattern(self, cmd,
pattern, error_pattern=None, timeout=None):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
log.info('telnet_send_command_with_unexpected_pattern')
if timeout is None:
timeout = self.timeout
cmd = cmd + self._carriage_return_for_send_command
self._writer.write(cmd.encode())
output = ''
byte_data = b''
pattern_not_found = True
try:
while pattern_not_found:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
output = str(byte_data)
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
if pattern:
for prompt in pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
if prompt in output:
pattern_not_found = False
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
break
if error_pattern and pattern_not_found:
for bad_prompt in error_pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
if bad_prompt in output:
log.error(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
raise Exception(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
except asyncio.TimeoutError:
await self.disconnect()
log.error(
'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'
)
raise
except Exception as error:
await self.disconnect()
log.error(
f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'
)
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""telnet_send_command_with_unexpected_pattern: raw output: '{output}'
telnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'
telnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"""
)
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_set')
if timeout is None:
timeout = self.timeout
log.info('send_command')
if self._protocol == 'ssh':
output = await self.send_config_setSSH(cmds, timeout)
elif self._protocol == 'telnet':
output = await self.send_config_setTelnet(cmds, timeout)
else:
raise Exception(
f'send_config_set: unsupported protocol: {self._protocol}')
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setSSH')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'
)
return returned_output
log.info('send_config_set: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: configuration mode entered')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command sent')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command to leave configuration mode sent'
)
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setTelnet')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'
)
return returned_output
log.info('send_config_setTelnet: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: configuration mode entered')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: command sent')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info(
'send_config_setTelnet: command to leave configuration mode sent')
output = ''
byte_data = b''
loop = 3
try:
while loop:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
if self.check_if_prompt_is_found(output):
break
loop -= 1
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
log.info('get_version')
version = ''
output = await self.send_command(self.cmd_get_version)
version = output.split('Version ')[1].split(',')[0]
log.info(f'get_version: version: {version}')
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
log.info('get_hostname')
output = await self.send_command(self.cmd_get_hostname)
log.info(f"get_hostname: output: '{output}'")
output = output.split()[0]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
log.info('get_model')
output = await self.send_command(self.cmd_get_model)
log.info(f"get_model: output: '{output}'")
output = output.split('"')[3]
log.info(f"get_model: model found: '{output}'")
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
log.info('get_serial_number')
output = await self.send_command(self.cmd_get_serial_number)
log.info(f"get_serial_number: output: '{output}'")
output = output.splitlines()[0].split()[-1]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
log.info('get_config')
if timeout is None:
timeout = self.timeout
output = await self.send_command(self.cmd_get_config, timeout=timeout)
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
log.info('save_config')
output = await self.send_command(self.cmd_save_config)
return output
<|reserved_special_token_1|>
import asyncio, asyncssh, logging
log = logging.getLogger(__package__)
logging.basicConfig(level=logging.DEBUG)
asyncssh.set_debug_level(2)
MAX_BUFFER_DATA = 65535
ipv4_netmask_list = {'0.0.0.0': '0', '128.0.0.0': '1', '192.0.0.0': '2',
'224.0.0.0': '3', '240.0.0.0': '4', '248.0.0.0': '5', '252.0.0.0': '6',
'254.0.0.0': '7', '255.0.0.0': '8', '255.128.0.0': '9', '255.192.0.0':
'10', '255.224.0.0': '11', '255.240.0.0': '12', '255.248.0.0': '13',
'255.252.0.0': '14', '255.254.0.0': '15', '255.255.0.0': '16',
'255.255.128.0': '17', '255.255.192.0': '18', '255.255.224.0': '19',
'255.255.240.0': '20', '255.255.248.0': '21', '255.255.252.0': '22',
'255.255.254.0': '23', '255.255.255.0': '24', '255.255.255.128': '25',
'255.255.255.192': '26', '255.255.255.224': '27', '255.255.255.240':
'28', '255.255.255.248': '29', '255.255.255.252': '30',
'255.255.255.254': '31', '255.255.255.255': '32'}
class NetworkDevice:
"""
Base class for network object
:param ip: IP address of a device
:type ip: str
:param username: Username used to connect to a device
:type username: str
:param password: Password used to connect to a device
:type password: str
:param device_type: Type of device used
:type device_type: str
:param port: TCP port used to connect a device. Default value is "22" for SSH
:type port: int, optional
:param timeout: TCP port used to connect a device. Default value is 10 seconds
:type timeout: int, optional
:param _protocol: Protocol used to connect a device. "ssh" or "telnet" are possible options. Default value is "ssh"
:type _protocol: str, optional
:param enable_mode: Enable mode for devices requiring it. Default value is "False"
:type enable_mode: bool, optional
:param enable_password: Enable password used for enable mode.
:type enable_password: str, optional
:param conn: Variable used for the management of the SSH connection
:type conn: SSHClientConnection object
:param _writer: Variable used for the management of the Telnet connection and writing channel
:type _writer: StreamWriter object
:param _reader: Variable used for the management of the Telnet reading channel
:type _reader: StreamReader object
:param possible_prompts: Used by the connect method to list all possible prompts of the device
:type possible_prompts: list
:param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt
:type _connect_first_ending_prompt: list
:param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands
:type list_of_possible_ending_prompts: list
:param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_login: str
:param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_password: list
:param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events
:type _telnet_connect_authentication_fail_prompt: list
:param cmd_enable: Enable command for entering into enable mode
:type cmd_enable: str
:param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time
:type cmd_disable_paging: str
:param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.
:type cmd_enter_config_mode: str
:param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.
:type cmd_exit_config_mode: str
:param cmd_get_version: API command used to get the software version of a device
:type cmd_get_version: str
:param cmd_get_hostname: API command used to get the hostname of a device
:type cmd_get_hostname: str
:param cmd_get_model: API command used to get the model of a device
:type cmd_get_model: str
:param cmd_get_serial_number: API command used to get the serial number of a device
:type cmd_get_serial_number: str
:param cmd_get_config: API command used to get the running configuration of a device
:type cmd_get_config: str
:param cmd_save_config: API command used to save the running configuration on the device
:type cmd_save_config: str
"""
def __init__(self, **kwargs):
log.info('__init__')
self.ip = ''
self.username = ''
self.password = ''
self.device_type = ''
self.port = 22
self.timeout = 10
self._protocol = 'ssh'
self.enable_mode = False
self.enable_password = ''
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ['#', '>']
self.list_of_possible_ending_prompts = ['(config-line)#',
'(config-if)#', '(config)#', '>', '#']
self._carriage_return_for_send_command = '\n'
self._send_command_error_in_returned_output = []
self._telnet_connect_login = 'Username:'
self._telnet_connect_password = 'Password:'
self._telnet_connect_authentication_fail_prompt = [':', '%']
self.cmd_enable = 'enable'
self.cmd_disable_paging = 'terminal length 0'
self.cmd_enter_config_mode = 'configure terminal'
self.cmd_exit_config_mode = 'exit'
self.cmd_get_version = 'show version'
self.cmd_get_hostname = 'show version | include uptime'
self.cmd_get_model = 'show inventory'
self.cmd_get_serial_number = 'show inventory | i SN'
self.cmd_get_config = 'show running-config'
self.cmd_save_config = 'write memory'
self.cmd_get_interfaces = [
'interface ethernet print terse without-paging',
'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'
, 'interface bridge port print terse without-paging']
self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',
'interface ethernet disable <INTERFACE>',
'interface ethernet comment <INTERFACE> "<COMMENT>"',
'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',
'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'
]
self.cmd_get_mac_address_table = (
'interface bridge host print without-paging')
self.cmd_get_arp = 'ip arp print terse without-paging'
self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'
self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'
self.cmd_add_vlan = (
'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
)
self.cmd_remove_vlan = (
'interface bridge vlan remove [find vlan-ids=<VLAN>]')
self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_remove_interface_from_vlan = [
'interface bridge vlan print terse',
'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'
,
'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'
,
'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'
]
self.cmd_get_routing_table = 'ip route print without-paging terse'
self.cmd_get_interfaces_ip = 'ip address print terse without-paging'
self.cmd_add_static_route = (
'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'
)
self.cmd_remove_static_route = (
'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')
log.debug('__init__: kwargs: ' + str(kwargs))
if 'ip' in kwargs:
self.ip = kwargs['ip']
log.info('__init__: ip found: ' + str(self.ip))
if 'username' in kwargs:
self.username = kwargs['username']
log.info('__init__: username found: ' + str(self.username))
if 'password' in kwargs:
self.password = kwargs['password']
log.debug('__init__: password found: ' + str(self.password))
if 'device_type' in kwargs:
self.device_type = kwargs['device_type']
log.info('__init__: device_type found: ' + str(self.device_type))
if 'timeout' in kwargs:
self.timeout = kwargs['timeout']
log.info('__init__: timeout found: ' + str(self.timeout))
if 'protocol' in kwargs:
self._protocol = kwargs['protocol'].lower()
log.info('__init__: protocol found: ' + str(self._protocol))
if self._protocol.lower() == 'telnet':
self.port = 23
if 'port' in kwargs:
self.port = kwargs['port']
log.info('__init__: port found: ' + str(self.port))
if 'enable_mode' in kwargs:
self.enable_mode = kwargs['enable_mode']
log.info('__init__: enable_mode found: ' + str(self.enable_mode))
if 'enable_password' in kwargs:
self.enable_password = kwargs['enable_password']
log.info('__init__: enable_password found: ' + str(self.
enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
await self.connect()
except Exception:
await self.disconnect()
raise
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt = text.split('\n')[-1]
prompt = text.split('\r')[-1]
log.info(f"find_prompt: prompt: '{prompt}'")
self.possible_prompts = self.get_possible_prompts(prompt)
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
list_of_prompts = []
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
my_prompt = prompt
for ending in list_of_possible_ending_prompts:
if my_prompt.endswith(ending):
my_prompt = my_prompt[:-len(ending)]
break
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'"
)
for ending in list_of_possible_ending_prompts:
list_of_prompts.append(my_prompt + ending)
log.info(
f'get_possible_prompts: list of possible prompts: {list_of_prompts}'
)
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
prompt_found = False
for prompt in self.possible_prompts:
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
if prompt in text:
prompt_found = True
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
break
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
log.info(f"remove_command_in_output: cmd = '{cmd}'")
log.info(
f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
output = text.split(cmd + '\n')[-1]
log.info(f"remove_command_in_output: output = '{output}'")
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
log.info('remove_starting_carriage_return_in_output')
output = text.lstrip('\r\n\r')
log.info(
f"remove_starting_carriage_return_in_output: output = '{output}'")
return output
def remove_ending_prompt_in_output(self, text):
"""
Method removing the prompt at the end of a string
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
log.info('remove_ending_prompt_in_output')
for prompt in self.possible_prompts:
log.info(f"remove_ending_prompt_in_output: prompt: '{prompt}'")
if prompt in text:
text = text[:-len(prompt)]
text = text.rstrip('\r\n')
break
log.info(
f"remove_ending_prompt_in_output: text without prompt:\n'{text}'")
return text
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
log.info('check_error_output')
if output:
log.info('check_error_output: output has some data')
for element in self._send_command_error_in_returned_output:
log.info(f'check_error_output: element: {element}')
log.info(f'check_error_output: output[0]: {output[0]}')
if output.startswith(element):
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
output = ''
esc_found = 0
for i in text:
if esc_found == 0:
if i == '\x1b':
log.info('Esc!')
esc_found = 1
else:
output += i
elif esc_found == 1:
if i == '[':
log.info('CSI sequence')
esc_found = 2
else:
output += '\x1b' + i
esc_found = 0
elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':
log.info('End of escape sequence')
esc_found = 0
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
log.info('disable_paging')
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
log.info('connect')
try:
if self._protocol == 'ssh':
await self.connectSSH()
elif self._protocol == 'telnet':
await self.connectTelnet()
else:
raise Exception(
f'connect: unsupported protocol: {self._protocol}')
except Exception:
log.info('connect: connection error')
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
log.info('connectSSH')
generator = asyncssh.connect(self.ip, username=self.username,
password=self.password, known_hosts=None, encryption_algs=[algs
.decode('utf-8') for algs in asyncssh.encryption._enc_algs])
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
log.error(
f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
raise asyncio.exceptions.TimeoutError(
'Connection failed: connection timed out.')
except Exception as error:
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
raise
log.info('connectSSH: connection success')
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type
='netscud')
log.info('connectSSH: open_session success')
data = ''
prompt_not_found = True
try:
while prompt_not_found:
log.info('connectSSH: beginning of the loop')
data += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f"connectSSH: data: '{str(data)}'")
log.info(
f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
for prompt in self._connect_first_ending_prompt:
if data.endswith(prompt):
log.info(
f"connectSSH: first ending prompt found: '{prompt}'"
)
prompt_not_found = False
break
log.info('connectSSH: end of loop')
except Exception as error:
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
raise
log.info(f'connectSSH: end of prompt loop')
data = self.remove_ansi_escape_sequence(data)
self.prompt = self.find_prompt(str(data))
log.info(f"connectSSH: prompt found: '{self.prompt}'")
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
if self.cmd_disable_paging:
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
log.info('connectTelnet')
try:
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
log.error(
f"connectTelnet: preparation to the connection failed: '{error}'"
)
raise
log.info('connectTelnet: preparation to the connection success')
try:
self._reader, self._writer = await asyncio.wait_for(conn,
timeout=self.timeout)
except asyncio.TimeoutError:
log.error('connectTelnet: connection: timeout')
raise
log.info('connectTelnet: connection success')
prompt = self._telnet_connect_login
prompt_password = self._telnet_connect_password
use_login = True
output = ''
byte_data = b''
while True:
log.info(f'connectTelnet: read data for prompt')
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=self.timeout)
log.info(f'connectTelnet: byte_data: {byte_data}')
output = str(byte_data)
log.info(f'connectTelnet: output: {output}')
if prompt in output:
break
elif prompt_password in output:
use_login = False
break
log.info(f"connectTelnet: login prompt: '{output}'")
if use_login:
log.info('connectTelnet: sending login')
try:
await self.send_command(self.username, prompt_password)
log.info('connectTelnet: login sent')
except Exception:
raise
log.info('connectTelnet: sending password')
try:
output = await self.telnet_send_command_with_unexpected_pattern(
self.password, self._connect_first_ending_prompt, self.
_telnet_connect_authentication_fail_prompt)
except Exception:
raise
log.info('connectTelnet: password sent')
self.prompt = self.find_prompt(str(output))
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
if self.enable_mode:
log.info('connectTelnet: enable mode to be activated')
try:
await self.send_command(self.cmd_enable, prompt_password)
log.info('connectTelnet: enable command sent')
log.info('connectTelnet: sending enable password')
await self.telnet_send_command_with_unexpected_pattern(self
.enable_password, self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt)
log.info('connectTelnet: enable password sent')
except Exception:
log.info('connectTelnet: enable password failure')
raise
if self.cmd_disable_paging:
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnect')
if self._protocol == 'ssh':
await self.disconnectSSH()
elif self._protocol == 'telnet':
await self.disconnectTelnet()
else:
raise Exception(f'Unsupported protocol: {self._protocol}')
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectSSH')
if self.conn:
self.conn.close()
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
log.info('disconnectTelnet')
if self._writer:
self._writer.close()
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_command')
if timeout is None:
timeout = self.timeout
if self._protocol == 'ssh':
output = await self.send_commandSSH(cmd, pattern=pattern,
timeout=timeout)
elif self._protocol == 'telnet':
output = await self.send_commandTelnet(cmd, pattern=pattern,
timeout=timeout)
else:
raise Exception(
f'send_command: unsupported protocol: {self._protocol}')
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandSSH')
if timeout is None:
timeout = self.timeout
log.info(f"send_commandSSH: cmd = '{cmd}'")
self.stdinx.write(cmd + self._carriage_return_for_send_command)
log.info('send_commandSSH: command sent')
output = ''
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
output = self.remove_ansi_escape_sequence(output)
output = output.replace('\r', '')
log.info(f"send_commandSSH: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_commandSSH: raw output: '{output}'
send_commandSSH: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandSSH: cleaned output: '{output}'
send_commandSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
log.info('send_commandTelnet')
if timeout is None:
timeout = self.timeout
cmd = cmd + '\n'
self._writer.write(cmd.encode())
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
output = str(byte_data)
log.info(f"send_commandTelnet: output: '{output}'")
if pattern:
if pattern in output:
break
elif self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_commandTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_commandTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_commandTelnet: raw output: '{output}'
send_commandTelnet: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_commandTelnet: cleaned output: '{output}'
send_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return output
async def telnet_send_command_with_unexpected_pattern(self, cmd,
pattern, error_pattern=None, timeout=None):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
log.info('telnet_send_command_with_unexpected_pattern')
if timeout is None:
timeout = self.timeout
cmd = cmd + self._carriage_return_for_send_command
self._writer.write(cmd.encode())
output = ''
byte_data = b''
pattern_not_found = True
try:
while pattern_not_found:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
output = str(byte_data)
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
if pattern:
for prompt in pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
if prompt in output:
pattern_not_found = False
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
break
if error_pattern and pattern_not_found:
for bad_prompt in error_pattern:
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
if bad_prompt in output:
log.error(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
raise Exception(
'telnet_send_command_with_unexpected_pattern: authentication failed'
)
except asyncio.TimeoutError:
await self.disconnect()
log.error(
'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'
)
raise
except Exception as error:
await self.disconnect()
log.error(
f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'
)
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""telnet_send_command_with_unexpected_pattern: raw output: '{output}'
telnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"""
)
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'
telnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"""
)
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_set')
if timeout is None:
timeout = self.timeout
log.info('send_command')
if self._protocol == 'ssh':
output = await self.send_config_setSSH(cmds, timeout)
elif self._protocol == 'telnet':
output = await self.send_config_setTelnet(cmds, timeout)
else:
raise Exception(
f'send_config_set: unsupported protocol: {self._protocol}')
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setSSH')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'
)
return returned_output
log.info('send_config_set: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: configuration mode entered')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command sent')
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setSSH: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setSSH: cmd = '{cmd}'")
self.stdinx.write(cmd)
log.info('send_config_setSSH: command to leave configuration mode sent'
)
while True:
output += await asyncio.wait_for(self.stdoutx.read(
MAX_BUFFER_DATA), timeout=timeout)
log.info(f"send_config_setSSH: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
log.debug(
f"""send_config_setSSH: raw output: '{output}'
send_config_setSSH: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setSSH: cleaned output: '{output}'
send_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
log.info('send_config_setTelnet')
if timeout is None:
timeout = self.timeout
returned_output = ''
if isinstance(cmds, str):
cmds = [cmds]
elif not isinstance(cmds, list):
log.error(
'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'
)
return returned_output
log.info('send_config_setTelnet: entering configuration mode')
output = ''
cmd = self.cmd_enter_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: configuration mode entered')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: sending commands')
output = ''
for cmd in cmds:
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info('send_config_setTelnet: command sent')
output = ''
byte_data = b''
try:
while True:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
if self.check_if_prompt_is_found(output):
break
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
log.info('send_config_setTelnet: leaving configuration mode')
output = ''
cmd = self.cmd_exit_config_mode
cmd = cmd + self._carriage_return_for_send_command
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
self._writer.write(cmd.encode())
log.info(
'send_config_setTelnet: command to leave configuration mode sent')
output = ''
byte_data = b''
loop = 3
try:
while loop:
byte_data += await asyncio.wait_for(self._reader.read(
MAX_BUFFER_DATA), timeout=timeout)
output = str(byte_data)
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
if self.check_if_prompt_is_found(output):
break
loop -= 1
except asyncio.TimeoutError:
log.error('send_config_setTelnet: connection: timeout')
raise
except Exception as error:
log.error(f'send_config_setTelnet: error: {error}')
raise
output = byte_data.decode('utf-8', 'ignore')
log.debug(
f"""send_config_setTelnet: raw output: '{output}'
send_config_setTelnet: raw output (hex): '{output.encode().hex()}'"""
)
returned_output += output
output = self.remove_command_in_output(output, str(cmd))
output = self.remove_starting_carriage_return_in_output(output)
output = self.remove_ending_prompt_in_output(output)
log.debug(
f"""send_config_setTelnet: cleaned output: '{output}'
send_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"""
)
self.check_error_output(output)
return returned_output
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
log.info('get_version')
version = ''
output = await self.send_command(self.cmd_get_version)
version = output.split('Version ')[1].split(',')[0]
log.info(f'get_version: version: {version}')
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
log.info('get_hostname')
output = await self.send_command(self.cmd_get_hostname)
log.info(f"get_hostname: output: '{output}'")
output = output.split()[0]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
log.info('get_model')
output = await self.send_command(self.cmd_get_model)
log.info(f"get_model: output: '{output}'")
output = output.split('"')[3]
log.info(f"get_model: model found: '{output}'")
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
log.info('get_serial_number')
output = await self.send_command(self.cmd_get_serial_number)
log.info(f"get_serial_number: output: '{output}'")
output = output.splitlines()[0].split()[-1]
log.info(f"get_hostname: hostname found: '{output}'")
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
log.info('get_config')
if timeout is None:
timeout = self.timeout
output = await self.send_command(self.cmd_get_config, timeout=timeout)
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
log.info('save_config')
output = await self.send_command(self.cmd_save_config)
return output
<|reserved_special_token_1|>
# Python library import
import asyncio, asyncssh, logging
# Module logging logger
log = logging.getLogger(__package__)
# Debug level
# logging.basicConfig(level=logging.WARNING)
# logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
asyncssh.set_debug_level(2)
# Declaration of constant values
# Max data to read in read function
MAX_BUFFER_DATA = 65535
# Dictonary with all netmasks of IPv4
ipv4_netmask_list = {
"0.0.0.0": "0",
"128.0.0.0": "1",
"192.0.0.0": "2",
"224.0.0.0": "3",
"240.0.0.0": "4",
"248.0.0.0": "5",
"252.0.0.0": "6",
"254.0.0.0": "7",
"255.0.0.0": "8",
"255.128.0.0": "9",
"255.192.0.0": "10",
"255.224.0.0": "11",
"255.240.0.0": "12",
"255.248.0.0": "13",
"255.252.0.0": "14",
"255.254.0.0": "15",
"255.255.0.0": "16",
"255.255.128.0": "17",
"255.255.192.0": "18",
"255.255.224.0": "19",
"255.255.240.0": "20",
"255.255.248.0": "21",
"255.255.252.0": "22",
"255.255.254.0": "23",
"255.255.255.0": "24",
"255.255.255.128": "25",
"255.255.255.192": "26",
"255.255.255.224": "27",
"255.255.255.240": "28",
"255.255.255.248": "29",
"255.255.255.252": "30",
"255.255.255.254": "31",
"255.255.255.255": "32",
}
class NetworkDevice:
"""
Base class for network object
:param ip: IP address of a device
:type ip: str
:param username: Username used to connect to a device
:type username: str
:param password: Password used to connect to a device
:type password: str
:param device_type: Type of device used
:type device_type: str
:param port: TCP port used to connect a device. Default value is "22" for SSH
:type port: int, optional
:param timeout: TCP port used to connect a device. Default value is 10 seconds
:type timeout: int, optional
:param _protocol: Protocol used to connect a device. "ssh" or "telnet" are possible options. Default value is "ssh"
:type _protocol: str, optional
:param enable_mode: Enable mode for devices requiring it. Default value is "False"
:type enable_mode: bool, optional
:param enable_password: Enable password used for enable mode.
:type enable_password: str, optional
:param conn: Variable used for the management of the SSH connection
:type conn: SSHClientConnection object
:param _writer: Variable used for the management of the Telnet connection and writing channel
:type _writer: StreamWriter object
:param _reader: Variable used for the management of the Telnet reading channel
:type _reader: StreamReader object
:param possible_prompts: Used by the connect method to list all possible prompts of the device
:type possible_prompts: list
:param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt
:type _connect_first_ending_prompt: list
:param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands
:type list_of_possible_ending_prompts: list
:param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_login: str
:param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_password: list
:param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events
:type _telnet_connect_authentication_fail_prompt: list
:param cmd_enable: Enable command for entering into enable mode
:type cmd_enable: str
:param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time
:type cmd_disable_paging: str
:param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.
:type cmd_enter_config_mode: str
:param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.
:type cmd_exit_config_mode: str
:param cmd_get_version: API command used to get the software version of a device
:type cmd_get_version: str
:param cmd_get_hostname: API command used to get the hostname of a device
:type cmd_get_hostname: str
:param cmd_get_model: API command used to get the model of a device
:type cmd_get_model: str
:param cmd_get_serial_number: API command used to get the serial number of a device
:type cmd_get_serial_number: str
:param cmd_get_config: API command used to get the running configuration of a device
:type cmd_get_config: str
:param cmd_save_config: API command used to save the running configuration on the device
:type cmd_save_config: str
"""
def __init__(self, **kwargs):
# Display info message
log.info("__init__")
self.ip = ""
self.username = ""
self.password = ""
self.device_type = ""
self.port = 22
self.timeout = 10
self._protocol = "ssh"
self.enable_mode = False
self.enable_password = ""
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ["#", ">"]
self.list_of_possible_ending_prompts = [
"(config-line)#",
"(config-if)#",
"(config)#",
">",
"#",
]
self._carriage_return_for_send_command = "\n"
self._send_command_error_in_returned_output = []
self._telnet_connect_login = "Username:"
self._telnet_connect_password = "Password:"
self._telnet_connect_authentication_fail_prompt = [":", "%"]
# General commands
self.cmd_enable = "enable"
self.cmd_disable_paging = "terminal length 0"
self.cmd_enter_config_mode = "configure terminal"
self.cmd_exit_config_mode = "exit"
self.cmd_get_version = "show version"
self.cmd_get_hostname = "show version | include uptime"
self.cmd_get_model = "show inventory"
self.cmd_get_serial_number = "show inventory | i SN"
self.cmd_get_config = "show running-config"
self.cmd_save_config = "write memory"
# Layer 1 commands
self.cmd_get_interfaces = [
"interface ethernet print terse without-paging",
"foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}",
"interface bridge port print terse without-paging",
]
self.cmd_set_interface = [
"interface ethernet enable <INTERFACE>",
"interface ethernet disable <INTERFACE>",
'interface ethernet comment <INTERFACE> "<COMMENT>"',
"interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>",
"interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]",
]
# Layer 2 commands
self.cmd_get_mac_address_table = "interface bridge host print without-paging"
self.cmd_get_arp = "ip arp print terse without-paging"
self.cmd_get_lldp_neighbors = "ip neighbor print terse without-paging"
self.cmd_get_vlans = "interface bridge vlan print terse without-paging"
self.cmd_add_vlan = 'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
self.cmd_remove_vlan = "interface bridge vlan remove [find vlan-ids=<VLAN>]"
self.cmd_add_interface_to_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
self.cmd_remove_interface_from_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
# Layer 3 commands
self.cmd_get_routing_table = "ip route print without-paging terse"
self.cmd_get_interfaces_ip = "ip address print terse without-paging"
self.cmd_add_static_route = "ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>"
self.cmd_remove_static_route = (
"ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]"
)
# Display info message
log.debug("__init__: kwargs: " + str(kwargs))
# Get information from dictionary
# "ip" found?
if "ip" in kwargs:
# Save "ip" parameter
self.ip = kwargs["ip"]
# Display info message
log.info("__init__: ip found: " + str(self.ip))
# "username" found?
if "username" in kwargs:
self.username = kwargs["username"]
# Display info message
log.info("__init__: username found: " + str(self.username))
# "password" found?
if "password" in kwargs:
self.password = kwargs["password"]
# Display info message
log.debug("__init__: password found: " + str(self.password))
# "device_type" found?
if "device_type" in kwargs:
self.device_type = kwargs["device_type"]
# Display info message
log.info("__init__: device_type found: " + str(self.device_type))
# "timeout" found?
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
# Display info message
log.info("__init__: timeout found: " + str(self.timeout))
# "protocol" found?
if "protocol" in kwargs:
self._protocol = kwargs["protocol"].lower()
# Display info message
log.info("__init__: protocol found: " + str(self._protocol))
# By default telnet port is 23
if self._protocol.lower() == "telnet":
self.port = 23
# "port" found?
if "port" in kwargs:
self.port = kwargs["port"]
# Display info message
log.info("__init__: port found: " + str(self.port))
# "enable_mode" found?
if "enable_mode" in kwargs:
self.enable_mode = kwargs["enable_mode"]
# Display info message
log.info("__init__: enable_mode found: " + str(self.enable_mode))
# "enable_password" found?
if "enable_password" in kwargs:
self.enable_password = kwargs["enable_password"]
# Display info message
log.info("__init__: enable_password found: " + str(self.enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
# Run an async method to connect a device
await self.connect()
except Exception:
# Disconnection (if needed) in case the connection is done but something failed
await self.disconnect()
# propagate exception if needed
raise
return self
# async def _aexit_(self, exc_type, exc_value, traceback):
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
# Close the connection
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
# Get last line of the data
prompt = text.split("\n")[-1]
# Remove possible \r in the data
# prompt = prompt.replace("\r", "")
prompt = text.split("\r")[-1]
# Display info message
log.info(f"find_prompt: prompt: '{prompt}'")
# Get the possible prompts for future recognition
self.possible_prompts = self.get_possible_prompts(prompt)
# Return the prompt
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
# By default no prompts are returned
list_of_prompts = []
# Get all the ppossible values of the endings of the prompt
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
# Temporary variable storing the prompt value
my_prompt = prompt
# Test each possible prompt ending (i.e '#', '>', "(config-if)#", "(config)#")
for ending in list_of_possible_ending_prompts:
# Is this current prompt ending at the end of the prompt?
if my_prompt.endswith(ending):
# Yes
# Then remove the ending
my_prompt = my_prompt[: -len(ending)]
# Break the loop
break
# Prompt should be from "switch#" to "switch"
# Display info message
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
# Display info message
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'")
# Now create all the possible prompts for that device
for ending in list_of_possible_ending_prompts:
# Save the prompt name with a possible ending in the list
list_of_prompts.append(my_prompt + ending)
# Display info message
log.info(f"get_possible_prompts: list of possible prompts: {list_of_prompts}")
# Return the list of prompts
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
# By default the prompt is not found
prompt_found = False
# Check all possible prompts
for prompt in self.possible_prompts:
# Display info message
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
# Is this prompt present in the text?
if prompt in text:
# Yes
prompt_found = True
# Display info message
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
# Leave the for loop
break
# Return the prompt found
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
# Display info message
log.info(f"remove_command_in_output: cmd = '{cmd}'")
# Display info message
log.info(f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
# Remove the command from the beginning of the output
# output = text.lstrip(cmd + "\n")
output = text.split(cmd + "\n")[-1]
# Display info message
log.info(f"remove_command_in_output: output = '{output}'")
# Return the string without the command
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
# Display info message
log.info("remove_starting_carriage_return_in_output")
# Remove the carriage return at the beginning of the string
output = text.lstrip("\r\n\r")
# Display info message
log.info(f"remove_starting_carriage_return_in_output: output = '{output}'")
# Return the string without the starting carriage return
return output
def remove_ending_prompt_in_output(self, text):
"""
Method removing the prompt at the end of a string
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
# Display info message
log.info("remove_ending_prompt_in_output")
# Check all possible prompts
for prompt in self.possible_prompts:
# Display info message
log.info(f"remove_ending_prompt_in_output: prompt: '{prompt}'")
# Prompt found in the text?
if prompt in text:
# Yes
# Then it is removed from the text
# text = text.rstrip(prompt)
text = text[: -len(prompt)]
# Remove also carriage return
text = text.rstrip("\r\n")
# Leave the loop
break
# output = text.rstrip("\r\n" + self.prompt)
# Display info message
log.info(f"remove_ending_prompt_in_output: text without prompt:\n'{text}'")
# Return the text without prompt at the end
return text
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
# Display info message
log.info("check_error_output")
# Check if output has some data
if output:
# Yes
# Display info message
log.info("check_error_output: output has some data")
# Check all elements in the list of output
for element in self._send_command_error_in_returned_output:
# Display info message
log.info(f"check_error_output: element: {element}")
# Display info message
log.info(f"check_error_output: output[0]: {output[0]}")
# Check if the output starts with a string with an error message (like "% Invalid input detected at '^' marker.")
# Error message?
if output.startswith(element):
# Yes
# Raise an exception
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
# By default no string returned
output = ""
# By default no escape sequence found
esc_found = 0
# Read char by char a string
for i in text:
# Display char
# log.info(f"{str(i).encode('ascii')}")
# No escape previously found?
if esc_found == 0:
# No escape sequence currently found
# Escape?
if i == "\x1b":
# Yes
log.info("Esc!")
# Escape found
esc_found = 1
else:
# No
# Then the current char can be saved
output += i
# Escape previously found?
elif esc_found == 1:
# Yes
# Then check if this is a CSI sequence
if i == "[":
# Beginning of CSI sequence
log.info("CSI sequence")
# CSI sequence
esc_found = 2
else:
# Another Escape sequence
# Keep the escape sequence in the string
output += "\x1b" + i
# No escape sequence next
esc_found = 0
else:
# Char between 'a' and 'z' or 'A' and 'Z'?
if (i >= "a" and i <= "z") or (i >= "A" and i <= "Z"):
# Yes
# Then it is the end of CSI escape sequence
log.info("End of escape sequence")
# No escape sequence next
esc_found = 0
# Return a string without ANSI escape sequence
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
# Display info message
log.info("disable_paging")
# Send command to the device to disable paging
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
# Display info message
log.info("connect")
try:
# SSH?
if self._protocol == "ssh":
# Yes
# Then Connect using SSH
await self.connectSSH()
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then Connect using Telnet
await self.connectTelnet()
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"connect: unsupported protocol: {self._protocol}")
except Exception:
# There was a problem with a connection method
# Display info message
log.info("connect: connection error")
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
# Display info message
log.info("connectSSH")
# Parameters of the connection
generator = asyncssh.connect(
self.ip,
username=self.username,
password=self.password,
known_hosts=None,
# encryption_algs="*", # Parameter that includes all encryption algorithms (even the old ones disabled by default)
encryption_algs=[
algs.decode("utf-8") for algs in asyncssh.encryption._enc_algs
], # Parameter that includes all encryption algorithms (even the old ones disabled by default)
)
# Trying to connect to the device
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
# Timeout
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
# Exception propagation
raise asyncio.exceptions.TimeoutError(
"Connection failed: connection timed out."
)
except Exception as error:
# Connection failed
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectSSH: connection success")
# Create a session
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type="netscud")
# Display info message
log.info("connectSSH: open_session success")
# By default no data has been read
data = ""
# By default no prompt found
prompt_not_found = True
try:
# Read data
while prompt_not_found:
# Display info message
log.info("connectSSH: beginning of the loop")
# Read the prompt
data += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectSSH: data: '{str(data)}'")
# Display info message
log.info(f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
# Check if an initial prompt is found
for prompt in self._connect_first_ending_prompt:
# Ending prompt found?
if data.endswith(prompt):
# Yes
# Display info message
log.info(f"connectSSH: first ending prompt found: '{prompt}'")
# A ending prompt has been found
prompt_not_found = False
# Leave the loop
break
# Display info message
log.info("connectSSH: end of loop")
except Exception as error:
# Fail while reading the prompt
# Display error message
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
# Exception propagation
raise
# Display info message
log.info(f"connectSSH: end of prompt loop")
# Remove possible escape sequence
data = self.remove_ansi_escape_sequence(data)
# Find prompt
self.prompt = self.find_prompt(str(data))
# Display info message
log.info(f"connectSSH: prompt found: '{self.prompt}'")
# Display info message
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
# Disable paging command available?
if self.cmd_disable_paging:
# Yes
# Disable paging
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
# Display info message
log.info("connectTelnet")
try:
# Prepare connection with Telnet
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
# Preparation to the connection failed
# Display error message
log.error(f"connectTelnet: preparation to the connection failed: '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: preparation to the connection success")
try:
# Connection with Telnet
self._reader, self._writer = await asyncio.wait_for(
conn, timeout=self.timeout
)
except asyncio.TimeoutError:
# Time out during connection
# Display error message
log.error("connectTelnet: connection: timeout")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: connection success")
# Get prompt for the login
prompt = self._telnet_connect_login
# Get prompt for the password
prompt_password = self._telnet_connect_password
# By default a login is expected
use_login = True
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)
while True:
# Display info message
log.info(f"connectTelnet: read data for prompt")
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectTelnet: byte_data: {byte_data}")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"connectTelnet: output: {output}")
# Prompt for the username found?
if prompt in output:
# Yes
# Leave the loop
break
# Prompt for the password found?
elif prompt_password in output:
# Yes
# That means only password is required
use_login = False
# Leave the loop
break
# Display info message
log.info(f"connectTelnet: login prompt: '{output}'")
# Login to use?
if use_login:
# Yes
# Display info message
log.info("connectTelnet: sending login")
try:
# Send login
await self.send_command(self.username, prompt_password)
# Display info message
log.info("connectTelnet: login sent")
except Exception:
# Problem with the login
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: sending password")
try:
# Send password
output = await self.telnet_send_command_with_unexpected_pattern(
self.password,
self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt,
)
except Exception:
# Problem with the password
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: password sent")
# Find prompt
self.prompt = self.find_prompt(str(output))
# Display info message
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
# Password enable?
if self.enable_mode:
# Yes
# Display info message
log.info("connectTelnet: enable mode to be activated")
try:
# Send enable command
await self.send_command(self.cmd_enable, prompt_password)
# Display info message
log.info("connectTelnet: enable command sent")
# Display info message
log.info("connectTelnet: sending enable password")
# Send enable password
await self.telnet_send_command_with_unexpected_pattern(
self.enable_password,
self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt,
)
# Display info message
log.info("connectTelnet: enable password sent")
except Exception:
# Problem with the enable password
# Display info message
log.info("connectTelnet: enable password failure")
# Propagate the exception
raise
# Disable paging command available?
if self.cmd_disable_paging:
# Yes
# Disable paging
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnect")
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
await self.disconnectSSH()
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
await self.disconnectTelnet()
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"Unsupported protocol: {self._protocol}")
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnectSSH")
# Connection previously open in SSH?
if self.conn:
# Yes
# Then close the SSH connection
self.conn.close()
# No more connection to disconnect
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnectTelnet")
# Connection previously open in Telnet?
if self._writer:
# Yes
# Then close the SSH connection
self._writer.close()
# No more connection to disconnect
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_command")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
output = await self.send_commandSSH(cmd, pattern=pattern, timeout=timeout)
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
output = await self.send_commandTelnet(
cmd, pattern=pattern, timeout=timeout
)
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"send_command: unsupported protocol: {self._protocol}")
# Return the result of the command
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandSSH")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
# cmd = cmd + "\n"
# cmd = cmd + "\r\n"
# Debug info message
log.info(f"send_commandSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd + self._carriage_return_for_send_command)
# Display message
log.info("send_commandSSH: command sent")
# Variable used to gather data
output = ""
# Reading data
while True:
# await asyncio.sleep(1)
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Debug info message
# log.info(f"send_commandSSH: output hex: '{str(output).encode("utf-8").hex()}'")
# Remove ANSI escape sequence
output = self.remove_ansi_escape_sequence(output)
# Remove possible "\r"
output = output.replace("\r", "")
# data = ""
# for i in output:
# data += i.encode("utf-8").hex()
# print(data)
# Debug info message
log.info(f"send_commandSSH: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the command
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + "\n"
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_commandTelnet: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_commandTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_commandTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the command
return output
async def telnet_send_command_with_unexpected_pattern(
self, cmd, pattern, error_pattern=None, timeout=None
):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("telnet_send_command_with_unexpected_pattern")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# By default pattern is not found
pattern_not_found = True
try:
# Read data
while pattern_not_found:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
# Display debug message
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
# Is a pattern used?
if pattern:
# Check all pattern of prompt in the output
for prompt in pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
# A pattern found?
if prompt in output:
# Yes
# A pattern is found. The main loop can be stopped
pattern_not_found = False
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
# Leave the loop
break
# Is an unexpected pattern used?
if error_pattern and pattern_not_found:
# Check all unexpected pattern of prompt in the output
for bad_prompt in error_pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
# An error_pattern pattern found?
if bad_prompt in output:
# Yes
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Raise exception
raise Exception(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Leave the loop
# break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: reading prompt: timeout"
)
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
f"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}"
)
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"
)
# Return the result of the command
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_set")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Debug info message
log.info("send_command")
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
output = await self.send_config_setSSH(cmds, timeout)
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
output = await self.send_config_setTelnet(cmds, timeout)
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"send_config_set: unsupported protocol: {self._protocol}")
# Return the result of the commands
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_setSSH")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Clear returned output
returned_output = ""
# Check if cmds is a string
if isinstance(cmds, str):
# A string
# Convert the string into a list
cmds = [cmds]
# A list?
elif not isinstance(cmds, list):
# Not a list (and not a string)
# Display error message
log.error(
"send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list"
)
# Leave the method
return returned_output
##############################
# Entering configuration mode
##############################
# Display info message
log.info("send_config_set: entering configuration mode")
# Clear output
output = ""
# Get command for entering in config made
cmd = self.cmd_enter_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display message
log.info("send_config_setSSH: configuration mode entered")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Sending commands
##############################
# Display info message
log.info("send_config_setSSH: sending commands")
# Clear output
output = ""
# Each command
for cmd in cmds:
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display info message
log.info("send_config_setSSH: command sent")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Leaving configuration mode
##############################
# Display info message
log.info("send_config_setSSH: leaving configuration mode")
# Clear output
output = ""
# Get command to leave config made
cmd = self.cmd_exit_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display info message
log.info("send_config_setSSH: command to leave configuration mode sent")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the commands
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_setTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Clear returned output
returned_output = ""
# Check if cmds is a string
if isinstance(cmds, str):
# A string
# Convert the string into a list
cmds = [cmds]
# A list?
elif not isinstance(cmds, list):
# Not a list (and not a string)
# Display error message
log.error(
"send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list"
)
# Leave the method
return returned_output
##############################
# Entering configuration mode
##############################
# Display info message
log.info("send_config_setTelnet: entering configuration mode")
# Clear output
output = ""
# Get command for entering in config made
cmd = self.cmd_enter_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display message
log.info("send_config_setTelnet: configuration mode entered")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Sending commands
##############################
# Display info message
log.info("send_config_setTelnet: sending commands")
# Clear output
output = ""
# Each command
for cmd in cmds:
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display info message
log.info("send_config_setTelnet: command sent")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Leaving configuration mode
##############################
# Display info message
log.info("send_config_setTelnet: leaving configuration mode")
# Clear output
output = ""
# Get command to leave config made
cmd = self.cmd_exit_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display info message
log.info("send_config_setTelnet: command to leave configuration mode sent")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Protection against infinite loop
loop = 3
try:
# Read data
while loop:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Protection for "exit" command infinite loop in Cisco when enable is not activated
loop -= 1
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the commands
return returned_output
#########################################################
#
# List of API
#
#########################################################
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
# Display info message
log.info("get_version")
# By default empty string
version = ""
# Run get version on the device
output = await self.send_command(self.cmd_get_version)
# Seek "Version " and "," to get the version in the returned output
version = output.split("Version ")[1].split(",")[0]
# Display info message
log.info(f"get_version: version: {version}")
# Return the version of the software of the device
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
# Display info message
log.info("get_hostname")
# Get hostname
output = await self.send_command(self.cmd_get_hostname)
# Display info message
log.info(f"get_hostname: output: '{output}'")
# Remove the useless information in the returned string
output = output.split()[0]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the name of the device
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
# Display info message
log.info("get_model")
# Get model
output = await self.send_command(self.cmd_get_model)
# Display info message
log.info(f"get_model: output: '{output}'")
# Remove the useless information in the returned string
output = output.split('"')[3]
# Display info message
log.info(f"get_model: model found: '{output}'")
# Return the model of the device
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
# Display info message
log.info("get_serial_number")
# Get serial number
output = await self.send_command(self.cmd_get_serial_number)
# Display info message
log.info(f"get_serial_number: output: '{output}'")
# Remove the useless information in the returned string
output = output.splitlines()[0].split()[-1]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the serial number of the device
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
# Display info message
log.info("get_config")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Get config
output = await self.send_command(self.cmd_get_config, timeout=timeout)
# Return de configuration of the device
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
# Display info message
log.info("save_config")
# Send command
output = await self.send_command(self.cmd_save_config)
# Return the commands of the configuration saving process
return output
|
flexible
|
{
"blob_id": "87baaf4a1b48fa248c65d26cc44e819a2ede1140",
"index": 3736,
"step-1": "<mask token>\n\n\nclass NetworkDevice:\n <mask token>\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n <mask token>\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-2": "<mask token>\n\n\nclass NetworkDevice:\n <mask token>\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\n<mask token>\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-4": "import asyncio, asyncssh, logging\nlog = logging.getLogger(__package__)\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\nMAX_BUFFER_DATA = 65535\nipv4_netmask_list = {'0.0.0.0': '0', '128.0.0.0': '1', '192.0.0.0': '2',\n '224.0.0.0': '3', '240.0.0.0': '4', '248.0.0.0': '5', '252.0.0.0': '6',\n '254.0.0.0': '7', '255.0.0.0': '8', '255.128.0.0': '9', '255.192.0.0':\n '10', '255.224.0.0': '11', '255.240.0.0': '12', '255.248.0.0': '13',\n '255.252.0.0': '14', '255.254.0.0': '15', '255.255.0.0': '16',\n '255.255.128.0': '17', '255.255.192.0': '18', '255.255.224.0': '19',\n '255.255.240.0': '20', '255.255.248.0': '21', '255.255.252.0': '22',\n '255.255.254.0': '23', '255.255.255.0': '24', '255.255.255.128': '25',\n '255.255.255.192': '26', '255.255.255.224': '27', '255.255.255.240':\n '28', '255.255.255.248': '29', '255.255.255.252': '30',\n '255.255.255.254': '31', '255.255.255.255': '32'}\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-5": "# Python library import\nimport asyncio, asyncssh, logging\n\n# Module logging logger\nlog = logging.getLogger(__package__)\n\n# Debug level\n# logging.basicConfig(level=logging.WARNING)\n# logging.basicConfig(level=logging.INFO)\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\n\n\n# Declaration of constant values\n\n# Max data to read in read function\nMAX_BUFFER_DATA = 65535\n\n\n# Dictonary with all netmasks of IPv4\nipv4_netmask_list = {\n \"0.0.0.0\": \"0\",\n \"128.0.0.0\": \"1\",\n \"192.0.0.0\": \"2\",\n \"224.0.0.0\": \"3\",\n \"240.0.0.0\": \"4\",\n \"248.0.0.0\": \"5\",\n \"252.0.0.0\": \"6\",\n \"254.0.0.0\": \"7\",\n \"255.0.0.0\": \"8\",\n \"255.128.0.0\": \"9\",\n \"255.192.0.0\": \"10\",\n \"255.224.0.0\": \"11\",\n \"255.240.0.0\": \"12\",\n \"255.248.0.0\": \"13\",\n \"255.252.0.0\": \"14\",\n \"255.254.0.0\": \"15\",\n \"255.255.0.0\": \"16\",\n \"255.255.128.0\": \"17\",\n \"255.255.192.0\": \"18\",\n \"255.255.224.0\": \"19\",\n \"255.255.240.0\": \"20\",\n \"255.255.248.0\": \"21\",\n \"255.255.252.0\": \"22\",\n \"255.255.254.0\": \"23\",\n \"255.255.255.0\": \"24\",\n \"255.255.255.128\": \"25\",\n \"255.255.255.192\": \"26\",\n \"255.255.255.224\": \"27\",\n \"255.255.255.240\": \"28\",\n \"255.255.255.248\": \"29\",\n \"255.255.255.252\": \"30\",\n \"255.255.255.254\": \"31\",\n \"255.255.255.255\": \"32\",\n}\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n\n # Display info message\n log.info(\"__init__\")\n\n self.ip = \"\"\n self.username = \"\"\n self.password = \"\"\n self.device_type = \"\"\n self.port = 22\n self.timeout = 10\n self._protocol = \"ssh\"\n self.enable_mode = False\n self.enable_password = \"\"\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = [\"#\", \">\"]\n self.list_of_possible_ending_prompts = [\n \"(config-line)#\",\n \"(config-if)#\",\n \"(config)#\",\n \">\",\n \"#\",\n ]\n self._carriage_return_for_send_command = \"\\n\"\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = \"Username:\"\n self._telnet_connect_password = \"Password:\"\n self._telnet_connect_authentication_fail_prompt = [\":\", \"%\"]\n\n # General commands\n self.cmd_enable = \"enable\"\n self.cmd_disable_paging = \"terminal length 0\"\n self.cmd_enter_config_mode = \"configure terminal\"\n self.cmd_exit_config_mode = \"exit\"\n self.cmd_get_version = \"show version\"\n self.cmd_get_hostname = \"show version | include uptime\"\n self.cmd_get_model = \"show inventory\"\n self.cmd_get_serial_number = \"show inventory | i SN\"\n self.cmd_get_config = \"show running-config\"\n self.cmd_save_config = \"write memory\"\n\n # Layer 1 commands\n self.cmd_get_interfaces = [\n \"interface ethernet print terse without-paging\",\n \"foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}\",\n \"interface bridge port print terse without-paging\",\n ]\n self.cmd_set_interface = [\n \"interface ethernet enable <INTERFACE>\",\n \"interface ethernet disable <INTERFACE>\",\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n \"interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>\",\n \"interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]\",\n ]\n\n # Layer 2 commands\n self.cmd_get_mac_address_table = \"interface bridge host print without-paging\"\n self.cmd_get_arp = \"ip arp print terse without-paging\"\n self.cmd_get_lldp_neighbors = \"ip neighbor print terse without-paging\"\n self.cmd_get_vlans = \"interface bridge vlan print terse without-paging\"\n self.cmd_add_vlan = 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n self.cmd_remove_vlan = \"interface bridge vlan remove [find vlan-ids=<VLAN>]\"\n self.cmd_add_interface_to_vlan = [\n \"interface bridge vlan print terse\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>\",\n \"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>\",\n ]\n self.cmd_remove_interface_from_vlan = [\n \"interface bridge vlan print terse\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>\",\n \"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>\",\n ]\n\n # Layer 3 commands\n self.cmd_get_routing_table = \"ip route print without-paging terse\"\n self.cmd_get_interfaces_ip = \"ip address print terse without-paging\"\n self.cmd_add_static_route = \"ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>\"\n self.cmd_remove_static_route = (\n \"ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]\"\n )\n\n # Display info message\n log.debug(\"__init__: kwargs: \" + str(kwargs))\n\n # Get information from dictionary\n\n # \"ip\" found?\n if \"ip\" in kwargs:\n\n # Save \"ip\" parameter\n self.ip = kwargs[\"ip\"]\n\n # Display info message\n log.info(\"__init__: ip found: \" + str(self.ip))\n\n # \"username\" found?\n if \"username\" in kwargs:\n self.username = kwargs[\"username\"]\n\n # Display info message\n log.info(\"__init__: username found: \" + str(self.username))\n\n # \"password\" found?\n if \"password\" in kwargs:\n self.password = kwargs[\"password\"]\n\n # Display info message\n log.debug(\"__init__: password found: \" + str(self.password))\n\n # \"device_type\" found?\n if \"device_type\" in kwargs:\n self.device_type = kwargs[\"device_type\"]\n\n # Display info message\n log.info(\"__init__: device_type found: \" + str(self.device_type))\n\n # \"timeout\" found?\n if \"timeout\" in kwargs:\n self.timeout = kwargs[\"timeout\"]\n\n # Display info message\n log.info(\"__init__: timeout found: \" + str(self.timeout))\n\n # \"protocol\" found?\n if \"protocol\" in kwargs:\n self._protocol = kwargs[\"protocol\"].lower()\n\n # Display info message\n log.info(\"__init__: protocol found: \" + str(self._protocol))\n\n # By default telnet port is 23\n if self._protocol.lower() == \"telnet\":\n self.port = 23\n\n # \"port\" found?\n if \"port\" in kwargs:\n self.port = kwargs[\"port\"]\n\n # Display info message\n log.info(\"__init__: port found: \" + str(self.port))\n\n # \"enable_mode\" found?\n if \"enable_mode\" in kwargs:\n self.enable_mode = kwargs[\"enable_mode\"]\n\n # Display info message\n log.info(\"__init__: enable_mode found: \" + str(self.enable_mode))\n\n # \"enable_password\" found?\n if \"enable_password\" in kwargs:\n self.enable_password = kwargs[\"enable_password\"]\n\n # Display info message\n log.info(\"__init__: enable_password found: \" + str(self.enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n\n try:\n # Run an async method to connect a device\n await self.connect()\n\n except Exception:\n\n # Disconnection (if needed) in case the connection is done but something failed\n await self.disconnect()\n\n # propagate exception if needed\n raise\n\n return self\n\n # async def _aexit_(self, exc_type, exc_value, traceback):\n async def __aexit__(self, exc_type, exc_value, traceback):\n\n \"\"\"\n Context manager closing connection\n \"\"\"\n\n # Close the connection\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n\n # Get last line of the data\n prompt = text.split(\"\\n\")[-1]\n\n # Remove possible \\r in the data\n # prompt = prompt.replace(\"\\r\", \"\")\n prompt = text.split(\"\\r\")[-1]\n\n # Display info message\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n\n # Get the possible prompts for future recognition\n self.possible_prompts = self.get_possible_prompts(prompt)\n\n # Return the prompt\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n\n # By default no prompts are returned\n list_of_prompts = []\n\n # Get all the ppossible values of the endings of the prompt\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n\n # Temporary variable storing the prompt value\n my_prompt = prompt\n\n # Test each possible prompt ending (i.e '#', '>', \"(config-if)#\", \"(config)#\")\n for ending in list_of_possible_ending_prompts:\n\n # Is this current prompt ending at the end of the prompt?\n if my_prompt.endswith(ending):\n\n # Yes\n\n # Then remove the ending\n my_prompt = my_prompt[: -len(ending)]\n\n # Break the loop\n break\n\n # Prompt should be from \"switch#\" to \"switch\"\n\n # Display info message\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n\n # Display info message\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\")\n\n # Now create all the possible prompts for that device\n for ending in list_of_possible_ending_prompts:\n\n # Save the prompt name with a possible ending in the list\n list_of_prompts.append(my_prompt + ending)\n\n # Display info message\n log.info(f\"get_possible_prompts: list of possible prompts: {list_of_prompts}\")\n\n # Return the list of prompts\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n\n # By default the prompt is not found\n prompt_found = False\n\n # Check all possible prompts\n for prompt in self.possible_prompts:\n\n # Display info message\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n\n # Is this prompt present in the text?\n if prompt in text:\n\n # Yes\n prompt_found = True\n\n # Display info message\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n\n # Leave the for loop\n break\n\n # Return the prompt found\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n\n # Display info message\n log.info(f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n\n # Remove the command from the beginning of the output\n # output = text.lstrip(cmd + \"\\n\")\n output = text.split(cmd + \"\\n\")[-1]\n\n # Display info message\n log.info(f\"remove_command_in_output: output = '{output}'\")\n\n # Return the string without the command\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"remove_starting_carriage_return_in_output\")\n\n # Remove the carriage return at the beginning of the string\n output = text.lstrip(\"\\r\\n\\r\")\n\n # Display info message\n log.info(f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n\n # Return the string without the starting carriage return\n return output\n\n def remove_ending_prompt_in_output(self, text):\n\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"remove_ending_prompt_in_output\")\n\n # Check all possible prompts\n for prompt in self.possible_prompts:\n\n # Display info message\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n\n # Prompt found in the text?\n if prompt in text:\n\n # Yes\n\n # Then it is removed from the text\n # text = text.rstrip(prompt)\n text = text[: -len(prompt)]\n\n # Remove also carriage return\n text = text.rstrip(\"\\r\\n\")\n\n # Leave the loop\n break\n\n # output = text.rstrip(\"\\r\\n\" + self.prompt)\n\n # Display info message\n log.info(f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n\n # Return the text without prompt at the end\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n\n # Display info message\n log.info(\"check_error_output\")\n\n # Check if output has some data\n if output:\n\n # Yes\n\n # Display info message\n log.info(\"check_error_output: output has some data\")\n\n # Check all elements in the list of output\n for element in self._send_command_error_in_returned_output:\n\n # Display info message\n log.info(f\"check_error_output: element: {element}\")\n\n # Display info message\n log.info(f\"check_error_output: output[0]: {output[0]}\")\n\n # Check if the output starts with a string with an error message (like \"% Invalid input detected at '^' marker.\")\n\n # Error message?\n if output.startswith(element):\n\n # Yes\n\n # Raise an exception\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n\n # Display info message\n log.info(\"disable_paging\")\n\n # Send command to the device to disable paging\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n\n # Display info message\n log.info(\"connect\")\n\n try:\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then Connect using SSH\n await self.connectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then Connect using Telnet\n await self.connectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"connect: unsupported protocol: {self._protocol}\")\n\n except Exception:\n\n # There was a problem with a connection method\n\n # Display info message\n log.info(\"connect: connection error\")\n\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n\n # Display info message\n log.info(\"connectSSH\")\n\n # Parameters of the connection\n generator = asyncssh.connect(\n self.ip,\n username=self.username,\n password=self.password,\n known_hosts=None,\n # encryption_algs=\"*\", # Parameter that includes all encryption algorithms (even the old ones disabled by default)\n encryption_algs=[\n algs.decode(\"utf-8\") for algs in asyncssh.encryption._enc_algs\n ], # Parameter that includes all encryption algorithms (even the old ones disabled by default)\n )\n\n # Trying to connect to the device\n try:\n\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n\n except asyncio.exceptions.TimeoutError as error:\n\n # Timeout\n\n # Display error message\n log.error(f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n\n # Exception propagation\n raise asyncio.exceptions.TimeoutError(\n \"Connection failed: connection timed out.\"\n )\n\n except Exception as error:\n\n # Connection failed\n\n # Display error message\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectSSH: connection success\")\n\n # Create a session\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type=\"netscud\")\n\n # Display info message\n log.info(\"connectSSH: open_session success\")\n\n # By default no data has been read\n data = \"\"\n\n # By default no prompt found\n prompt_not_found = True\n\n try:\n\n # Read data\n while prompt_not_found:\n\n # Display info message\n log.info(\"connectSSH: beginning of the loop\")\n\n # Read the prompt\n data += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectSSH: data: '{str(data)}'\")\n\n # Display info message\n log.info(f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n\n # Check if an initial prompt is found\n for prompt in self._connect_first_ending_prompt:\n\n # Ending prompt found?\n if data.endswith(prompt):\n\n # Yes\n\n # Display info message\n log.info(f\"connectSSH: first ending prompt found: '{prompt}'\")\n\n # A ending prompt has been found\n prompt_not_found = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(\"connectSSH: end of loop\")\n\n except Exception as error:\n\n # Fail while reading the prompt\n\n # Display error message\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(f\"connectSSH: end of prompt loop\")\n\n # Remove possible escape sequence\n data = self.remove_ansi_escape_sequence(data)\n\n # Find prompt\n self.prompt = self.find_prompt(str(data))\n\n # Display info message\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n\n # Display info message\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n # Yes\n\n # Disable paging\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n\n # Display info message\n log.info(\"connectTelnet\")\n\n try:\n\n # Prepare connection with Telnet\n conn = asyncio.open_connection(self.ip, self.port)\n\n except Exception as error:\n\n # Preparation to the connection failed\n\n # Display error message\n log.error(f\"connectTelnet: preparation to the connection failed: '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: preparation to the connection success\")\n\n try:\n\n # Connection with Telnet\n self._reader, self._writer = await asyncio.wait_for(\n conn, timeout=self.timeout\n )\n\n except asyncio.TimeoutError:\n\n # Time out during connection\n\n # Display error message\n log.error(\"connectTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: connection success\")\n\n # Get prompt for the login\n prompt = self._telnet_connect_login\n\n # Get prompt for the password\n prompt_password = self._telnet_connect_password\n\n # By default a login is expected\n use_login = True\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)\n while True:\n\n # Display info message\n log.info(f\"connectTelnet: read data for prompt\")\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectTelnet: byte_data: {byte_data}\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"connectTelnet: output: {output}\")\n\n # Prompt for the username found?\n if prompt in output:\n\n # Yes\n\n # Leave the loop\n break\n\n # Prompt for the password found?\n elif prompt_password in output:\n\n # Yes\n\n # That means only password is required\n use_login = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n\n # Login to use?\n if use_login:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: sending login\")\n\n try:\n\n # Send login\n await self.send_command(self.username, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: login sent\")\n\n except Exception:\n\n # Problem with the login\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: sending password\")\n\n try:\n # Send password\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n except Exception:\n\n # Problem with the password\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: password sent\")\n\n # Find prompt\n self.prompt = self.find_prompt(str(output))\n\n # Display info message\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n\n # Password enable?\n if self.enable_mode:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: enable mode to be activated\")\n\n try:\n\n # Send enable command\n await self.send_command(self.cmd_enable, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: enable command sent\")\n\n # Display info message\n log.info(\"connectTelnet: sending enable password\")\n\n # Send enable password\n await self.telnet_send_command_with_unexpected_pattern(\n self.enable_password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n # Display info message\n log.info(\"connectTelnet: enable password sent\")\n\n except Exception:\n\n # Problem with the enable password\n\n # Display info message\n log.info(\"connectTelnet: enable password failure\")\n\n # Propagate the exception\n raise\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n\n # Yes\n\n # Disable paging\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnect\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n await self.disconnectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n await self.disconnectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"Unsupported protocol: {self._protocol}\")\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnectSSH\")\n\n # Connection previously open in SSH?\n if self.conn:\n\n # Yes\n\n # Then close the SSH connection\n self.conn.close()\n\n # No more connection to disconnect\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnectTelnet\")\n\n # Connection previously open in Telnet?\n if self._writer:\n\n # Yes\n\n # Then close the SSH connection\n self._writer.close()\n\n # No more connection to disconnect\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_command\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n output = await self.send_commandSSH(cmd, pattern=pattern, timeout=timeout)\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n output = await self.send_commandTelnet(\n cmd, pattern=pattern, timeout=timeout\n )\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"send_command: unsupported protocol: {self._protocol}\")\n\n # Return the result of the command\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_commandSSH\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n # cmd = cmd + \"\\n\"\n # cmd = cmd + \"\\r\\n\"\n\n # Debug info message\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n\n # Display message\n log.info(\"send_commandSSH: command sent\")\n\n # Variable used to gather data\n output = \"\"\n\n # Reading data\n while True:\n\n # await asyncio.sleep(1)\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Debug info message\n # log.info(f\"send_commandSSH: output hex: '{str(output).encode(\"utf-8\").hex()}'\")\n\n # Remove ANSI escape sequence\n output = self.remove_ansi_escape_sequence(output)\n\n # Remove possible \"\\r\"\n output = output.replace(\"\\r\", \"\")\n\n # data = \"\"\n # for i in output:\n # data += i.encode(\"utf-8\").hex()\n\n # print(data)\n\n # Debug info message\n log.info(f\"send_commandSSH: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_commandSSH: raw output: '{output}'\\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandSSH: cleaned output: '{output}'\\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_commandTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + \"\\n\"\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_commandTelnet: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_commandTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_commandTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: raw output: '{output}'\\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: cleaned output: '{output}'\\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output\n\n async def telnet_send_command_with_unexpected_pattern(\n self, cmd, pattern, error_pattern=None, timeout=None\n ):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"telnet_send_command_with_unexpected_pattern\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # By default pattern is not found\n pattern_not_found = True\n\n try:\n\n # Read data\n while pattern_not_found:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n\n # Display debug message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n\n # Is a pattern used?\n if pattern:\n\n # Check all pattern of prompt in the output\n for prompt in pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n\n # A pattern found?\n if prompt in output:\n\n # Yes\n\n # A pattern is found. The main loop can be stopped\n pattern_not_found = False\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n\n # Leave the loop\n break\n\n # Is an unexpected pattern used?\n if error_pattern and pattern_not_found:\n\n # Check all unexpected pattern of prompt in the output\n for bad_prompt in error_pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n\n # An error_pattern pattern found?\n if bad_prompt in output:\n\n # Yes\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Raise exception\n raise Exception(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Leave the loop\n # break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: reading prompt: timeout\"\n )\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n f\"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}\"\n )\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Return the result of the command\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_set\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Debug info message\n log.info(\"send_command\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n output = await self.send_config_setSSH(cmds, timeout)\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n output = await self.send_config_setTelnet(cmds, timeout)\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"send_config_set: unsupported protocol: {self._protocol}\")\n\n # Return the result of the commands\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_setSSH\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Clear returned output\n returned_output = \"\"\n\n # Check if cmds is a string\n if isinstance(cmds, str):\n\n # A string\n\n # Convert the string into a list\n cmds = [cmds]\n\n # A list?\n elif not isinstance(cmds, list):\n\n # Not a list (and not a string)\n\n # Display error message\n log.error(\n \"send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list\"\n )\n\n # Leave the method\n return returned_output\n\n ##############################\n # Entering configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_set: entering configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command for entering in config made\n cmd = self.cmd_enter_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display message\n log.info(\"send_config_setSSH: configuration mode entered\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Sending commands\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: sending commands\")\n\n # Clear output\n output = \"\"\n\n # Each command\n for cmd in cmds:\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Leaving configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: leaving configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command to leave config made\n cmd = self.cmd_exit_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command to leave configuration mode sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the commands\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_setTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Clear returned output\n returned_output = \"\"\n\n # Check if cmds is a string\n if isinstance(cmds, str):\n\n # A string\n\n # Convert the string into a list\n cmds = [cmds]\n\n # A list?\n elif not isinstance(cmds, list):\n\n # Not a list (and not a string)\n\n # Display error message\n log.error(\n \"send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list\"\n )\n\n # Leave the method\n return returned_output\n\n ##############################\n # Entering configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: entering configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command for entering in config made\n cmd = self.cmd_enter_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display message\n log.info(\"send_config_setTelnet: configuration mode entered\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Sending commands\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: sending commands\")\n\n # Clear output\n output = \"\"\n\n # Each command\n for cmd in cmds:\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display info message\n log.info(\"send_config_setTelnet: command sent\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Leaving configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: leaving configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command to leave config made\n cmd = self.cmd_exit_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display info message\n log.info(\"send_config_setTelnet: command to leave configuration mode sent\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Protection against infinite loop\n loop = 3\n\n try:\n\n # Read data\n while loop:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n await asyncio.sleep(0.5)\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Protection for \"exit\" command infinite loop in Cisco when enable is not activated\n loop -= 1\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the commands\n return returned_output\n\n #########################################################\n #\n # List of API\n #\n #########################################################\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version \" and \",\" to get the version in the returned output\n version = output.split(\"Version \")[1].split(\",\")[0]\n\n # Display info message\n log.info(f\"get_version: version: {version}\")\n\n # Return the version of the software of the device\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_hostname\")\n\n # Get hostname\n output = await self.send_command(self.cmd_get_hostname)\n\n # Display info message\n log.info(f\"get_hostname: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split()[0]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the name of the device\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_model\")\n\n # Get model\n output = await self.send_command(self.cmd_get_model)\n\n # Display info message\n log.info(f\"get_model: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split('\"')[3]\n\n # Display info message\n log.info(f\"get_model: model found: '{output}'\")\n\n # Return the model of the device\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_serial_number\")\n\n # Get serial number\n output = await self.send_command(self.cmd_get_serial_number)\n\n # Display info message\n log.info(f\"get_serial_number: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.splitlines()[0].split()[-1]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the serial number of the device\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_config\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Get config\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n\n # Return de configuration of the device\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command\n output = await self.send_command(self.cmd_save_config)\n\n # Return the commands of the configuration saving process\n return output\n",
"step-ids": [
9,
10,
12,
14,
15
]
}
|
[
9,
10,
12,
14,
15
] |
<|reserved_special_token_0|>
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager,
LoggingMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {
'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +
OP_PERMISSIONS + ADMIN_PERMISSIONS}]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_root_dag_id(self, dag_id: str) ->str:
if '.' in dag_id:
dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,
DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()
return dm.root_dag_id or dm.dag_id
return dag_id
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
'`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user
)
def get_editable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
'`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user
)
<|reserved_special_token_0|>
def get_readable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(self, user, user_actions: (Container[str] |
None)=None, session: Session=NEW_SESSION) ->set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.
ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.
can_edit_all_dags(user) or permissions.ACTION_CAN_READ in
user_actions and self.can_read_all_dags(user)):
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
user_query = session.scalar(select(User).options(joinedload(
User.roles).subqueryload(Role.permissions).options(
joinedload(Permission.action), joinedload(Permission.
resource))).where(User.id == user.id))
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.
RESOURCE_DAG_PREFIX):])
else:
resources.add(resource)
return {dag.dag_id for dag in session.execute(select(DagModel.
dag_id).where(DagModel.dag_id.in_(resources)))}
def can_access_some_dags(self, action: str, dag_id: (str | None)=None
) ->bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.
resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ,
dag_resource_name, user=user)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def prefixed_dag_id(self, dag_id: str) ->str:
"""Returns the permission name for a DAG id."""
warnings.warn(
'`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) ->bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None
) ->bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) ->bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) ->bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user
) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)
def can_edit_all_dags(self, user=None) ->bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG, user)
<|reserved_special_token_0|>
def clean_perms(self) ->None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug('Cleaning faulty perms')
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(or_(Permission.action == None,
Permission.resource == None))
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
<|reserved_special_token_0|>
def add_homepage_access_to_custom_roles(self) ->None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.
ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name
not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) ->set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(self.appbuilder.get_session.execute(select(self.
action_model.name, self.resource_model.name).join(self.
permission_model.action).join(self.permission_model.resource)))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_dag_specific_permissions(self) ->None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag
.dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) ->None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(select(Resource).where(Resource.
name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.
resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) ->None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.create_perm_vm_for_all_dag()
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |
None)=None) ->None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,
Collection[str]] | None)=None) ->None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'",
dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset."
, dag_resource_name)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[
str, Collection[str]]) ->None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) ->(Permission |
None):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'",
action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name !=
'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info("Revoking '%s' on DAG '%s' for role '%s'"
, perm.action, dag_resource_name, role.name)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) ->None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)
=None, dag_id: (str | None)=None) ->bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in ((permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.
RESOURCE_DAG)):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: (Session | None)=None) ->None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: (Session | None)=None):
self.appbuilder = FakeAppBuilder(session)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager,
LoggingMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {
'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +
OP_PERMISSIONS + ADMIN_PERMISSIONS}]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_root_dag_id(self, dag_id: str) ->str:
if '.' in dag_id:
dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,
DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()
return dm.root_dag_id or dm.dag_id
return dag_id
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
'`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user
)
def get_editable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
'`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user
)
<|reserved_special_token_0|>
def get_readable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(self, user, user_actions: (Container[str] |
None)=None, session: Session=NEW_SESSION) ->set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.
ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.
can_edit_all_dags(user) or permissions.ACTION_CAN_READ in
user_actions and self.can_read_all_dags(user)):
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
user_query = session.scalar(select(User).options(joinedload(
User.roles).subqueryload(Role.permissions).options(
joinedload(Permission.action), joinedload(Permission.
resource))).where(User.id == user.id))
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.
RESOURCE_DAG_PREFIX):])
else:
resources.add(resource)
return {dag.dag_id for dag in session.execute(select(DagModel.
dag_id).where(DagModel.dag_id.in_(resources)))}
def can_access_some_dags(self, action: str, dag_id: (str | None)=None
) ->bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.
resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ,
dag_resource_name, user=user)
<|reserved_special_token_0|>
def can_delete_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE,
dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) ->str:
"""Returns the permission name for a DAG id."""
warnings.warn(
'`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) ->bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None
) ->bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) ->bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) ->bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user
) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)
def can_edit_all_dags(self, user=None) ->bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) ->bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG, user)
def clean_perms(self) ->None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug('Cleaning faulty perms')
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(or_(Permission.action == None,
Permission.resource == None))
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
<|reserved_special_token_0|>
def add_homepage_access_to_custom_roles(self) ->None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.
ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name
not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) ->set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(self.appbuilder.get_session.execute(select(self.
action_model.name, self.resource_model.name).join(self.
permission_model.action).join(self.permission_model.resource)))
<|reserved_special_token_0|>
def _get_all_roles_with_permissions(self) ->dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {r.name: r for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.
permissions))).unique()}
def create_dag_specific_permissions(self) ->None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag
.dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) ->None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(select(Resource).where(Resource.
name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.
resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) ->None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.create_perm_vm_for_all_dag()
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |
None)=None) ->None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,
Collection[str]] | None)=None) ->None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'",
dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset."
, dag_resource_name)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[
str, Collection[str]]) ->None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) ->(Permission |
None):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'",
action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name !=
'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info("Revoking '%s' on DAG '%s' for role '%s'"
, perm.action, dag_resource_name, role.name)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) ->None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)
=None, dag_id: (str | None)=None) ->bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in ((permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.
RESOURCE_DAG)):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: (Session | None)=None) ->None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: (Session | None)=None):
self.appbuilder = FakeAppBuilder(session)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager,
LoggingMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {
'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +
OP_PERMISSIONS + ADMIN_PERMISSIONS}]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, appbuilder) ->None:
super().__init__(appbuilder=appbuilder, actionmodelview=self.
actionmodelview, authdbview=self.authdbview, authldapview=self.
authldapview, authoauthview=self.authoauthview, authoidview=
self.authoidview, authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview, registeruser_view
=self.registeruser_view, registeruserdbview=self.
registeruserdbview, registeruseroauthview=self.
registeruseroauthview, registerusermodelview=self.
registerusermodelview, registeruseroidview=self.
registeruseroidview, resetmypasswordview=self.
resetmypasswordview, resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview, user_model=self.user_model,
userinfoeditview=self.userinfoeditview, userdbmodelview=self.
userdbmodelview, userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview, useroidmodelview=
self.useroidmodelview, userremoteusermodelview=self.
userremoteusermodelview, userstatschartview=self.userstatschartview
)
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) ->str:
if '.' in dag_id:
dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,
DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()
return dm.root_dag_id or dm.dag_id
return dag_id
<|reserved_special_token_0|>
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config['role']
perms = config['perms']
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)
) or self.create_permission(action_name, resource_name)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
'`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user
)
def get_editable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
'`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user
)
<|reserved_special_token_0|>
def get_readable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(self, user, user_actions: (Container[str] |
None)=None, session: Session=NEW_SESSION) ->set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.
ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.
can_edit_all_dags(user) or permissions.ACTION_CAN_READ in
user_actions and self.can_read_all_dags(user)):
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
user_query = session.scalar(select(User).options(joinedload(
User.roles).subqueryload(Role.permissions).options(
joinedload(Permission.action), joinedload(Permission.
resource))).where(User.id == user.id))
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.
RESOURCE_DAG_PREFIX):])
else:
resources.add(resource)
return {dag.dag_id for dag in session.execute(select(DagModel.
dag_id).where(DagModel.dag_id.in_(resources)))}
def can_access_some_dags(self, action: str, dag_id: (str | None)=None
) ->bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.
resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ,
dag_resource_name, user=user)
<|reserved_special_token_0|>
def can_delete_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE,
dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) ->str:
"""Returns the permission name for a DAG id."""
warnings.warn(
'`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) ->bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None
) ->bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) ->bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) ->bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user
) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)
def can_edit_all_dags(self, user=None) ->bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) ->bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG, user)
def clean_perms(self) ->None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug('Cleaning faulty perms')
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(or_(Permission.action == None,
Permission.resource == None))
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) ->None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(select(self.
permission_model).filter_by(action=action, resource=
resource).limit(1))
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) ->None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.
ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name
not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) ->set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(self.appbuilder.get_session.execute(select(self.
action_model.name, self.resource_model.name).join(self.
permission_model.action).join(self.permission_model.resource)))
def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {(action_name, resource_name): viewmodel for action_name,
resource_name, viewmodel in self.appbuilder.get_session.execute
(select(self.action_model.name, self.resource_model.name, self.
permission_model).join(self.permission_model.action).join(self.
permission_model.resource).where(~self.resource_model.name.like
(f'{permissions.RESOURCE_DAG_PREFIX}%')))}
def _get_all_roles_with_permissions(self) ->dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {r.name: r for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.
permissions))).unique()}
def create_dag_specific_permissions(self) ->None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag
.dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) ->None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(select(Resource).where(Resource.
name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.
resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) ->None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.create_perm_vm_for_all_dag()
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |
None)=None) ->None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,
Collection[str]] | None)=None) ->None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'",
dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset."
, dag_resource_name)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[
str, Collection[str]]) ->None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) ->(Permission |
None):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'",
action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name !=
'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info("Revoking '%s' on DAG '%s' for role '%s'"
, perm.action, dag_resource_name, role.name)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) ->None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)
=None, dag_id: (str | None)=None) ->bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in ((permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.
RESOURCE_DAG)):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: (Session | None)=None) ->None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: (Session | None)=None):
self.appbuilder = FakeAppBuilder(session)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
EXISTING_ROLES = {'Admin', 'Viewer', 'User', 'Op', 'Public'}
if TYPE_CHECKING:
from sqlalchemy.orm import Session
SecurityManagerOverride: type = object
else:
SecurityManagerOverride = get_auth_manager(
).get_security_manager_override_class()
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager,
LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
VIEWER_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.
RESOURCE_AUDIT_LOG), (permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_DAG_CODE), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_JOB), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD), (permissions.
ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE), (permissions.
ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_PLUGIN), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_XCOM), (permissions.
ACTION_CAN_READ, permissions.RESOURCE_WEBSITE), (permissions.
ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU), (
permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG), (
permissions.ACTION_CAN_ACCESS_MENU, permissions.
RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.
ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS), (permissions.
ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU), (
permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB), (
permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.
RESOURCE_TASK_INSTANCE)]
USER_PERMISSIONS = [(permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_CREATE, permissions.
RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_DELETE,
permissions.RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_CREATE,
permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_EDIT,
permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_DELETE,
permissions.RESOURCE_DAG_RUN)]
OP_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.
RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU, permissions.
RESOURCE_ADMIN_MENU), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_CONNECTION), (permissions.
ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL), (permissions.
ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE), (
permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM), (
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), (
permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION), (
permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION), (
permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION), (
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL), (
permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL), (
permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL), (
permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL), (
permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER), (
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE), (
permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE), (
permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE), (
permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE), (
permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM)]
ADMIN_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.
RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_ACCESS_MENU,
permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_EDIT,
permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_READ,
permissions.RESOURCE_ROLE), (permissions.ACTION_CAN_EDIT,
permissions.RESOURCE_ROLE)]
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {
'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +
OP_PERMISSIONS + ADMIN_PERMISSIONS}]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) ->None:
super().__init__(appbuilder=appbuilder, actionmodelview=self.
actionmodelview, authdbview=self.authdbview, authldapview=self.
authldapview, authoauthview=self.authoauthview, authoidview=
self.authoidview, authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview, registeruser_view
=self.registeruser_view, registeruserdbview=self.
registeruserdbview, registeruseroauthview=self.
registeruseroauthview, registerusermodelview=self.
registerusermodelview, registeruseroidview=self.
registeruseroidview, resetmypasswordview=self.
resetmypasswordview, resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview, user_model=self.user_model,
userinfoeditview=self.userinfoeditview, userdbmodelview=self.
userdbmodelview, userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview, useroidmodelview=
self.useroidmodelview, userremoteusermodelview=self.
userremoteusermodelview, userstatschartview=self.userstatschartview
)
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) ->str:
if '.' in dag_id:
dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,
DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) ->None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
'`init_role` has been deprecated. Please use `bulk_sync_roles` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
self.bulk_sync_roles([{'role': role_name, 'perms': perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config['role']
perms = config['perms']
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)
) or self.create_permission(action_name, resource_name)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
'`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user
)
def get_editable_dags(self, user) ->Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
'`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user
)
@provide_session
def get_accessible_dags(self, user_actions: (Container[str] | None),
user, session: Session=NEW_SESSION) ->Iterable[DagModel]:
warnings.warn(
'`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.'
, RemovedInAirflow3Warning, stacklevel=3)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.scalars(select(DagModel).where(DagModel.dag_id.in_(
dag_ids)))
def get_readable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) ->set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(self, user, user_actions: (Container[str] |
None)=None, session: Session=NEW_SESSION) ->set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.
ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.
can_edit_all_dags(user) or permissions.ACTION_CAN_READ in
user_actions and self.can_read_all_dags(user)):
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
user_query = session.scalar(select(User).options(joinedload(
User.roles).subqueryload(Role.permissions).options(
joinedload(Permission.action), joinedload(Permission.
resource))).where(User.id == user.id))
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(
DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.
RESOURCE_DAG_PREFIX):])
else:
resources.add(resource)
return {dag.dag_id for dag in session.execute(select(DagModel.
dag_id).where(DagModel.dag_id.in_(resources)))}
def can_access_some_dags(self, action: str, dag_id: (str | None)=None
) ->bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.
resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ,
dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT,
dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) ->bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE,
dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) ->str:
"""Returns the permission name for a DAG id."""
warnings.warn(
'`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'
, RemovedInAirflow3Warning, stacklevel=2)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) ->bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None
) ->bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) ->bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) ->bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user
) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)
def can_edit_all_dags(self, user=None) ->bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) ->bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG, user)
def clean_perms(self) ->None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug('Cleaning faulty perms')
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(or_(Permission.action == None,
Permission.resource == None))
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) ->None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(select(self.
permission_model).filter_by(action=action, resource=
resource).limit(1))
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) ->None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.
ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name
not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) ->set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(self.appbuilder.get_session.execute(select(self.
action_model.name, self.resource_model.name).join(self.
permission_model.action).join(self.permission_model.resource)))
def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {(action_name, resource_name): viewmodel for action_name,
resource_name, viewmodel in self.appbuilder.get_session.execute
(select(self.action_model.name, self.resource_model.name, self.
permission_model).join(self.permission_model.action).join(self.
permission_model.resource).where(~self.resource_model.name.like
(f'{permissions.RESOURCE_DAG_PREFIX}%')))}
def _get_all_roles_with_permissions(self) ->dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {r.name: r for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.
permissions))).unique()}
def create_dag_specific_permissions(self) ->None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag
.dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) ->None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(select(Resource).where(Resource.
name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.
resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) ->None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.create_perm_vm_for_all_dag()
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |
None)=None) ->None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,
Collection[str]] | None)=None) ->None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'",
dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset."
, dag_resource_name)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[
str, Collection[str]]) ->None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) ->(Permission |
None):
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'",
action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name !=
'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info("Revoking '%s' on DAG '%s' for role '%s'"
, perm.action, dag_resource_name, role.name)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) ->None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)
=None, dag_id: (str | None)=None) ->bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in ((permissions.ACTION_CAN_READ, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.
RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.
RESOURCE_DAG)):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: (Session | None)=None) ->None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: (Session | None)=None):
self.appbuilder = FakeAppBuilder(session)
<|reserved_special_token_1|>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence
from flask import g
from sqlalchemy import or_, select
from sqlalchemy.orm import joinedload
from airflow.auth.managers.fab.models import Permission, Resource, Role, User
from airflow.auth.managers.fab.views.permissions import (
ActionModelView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.auth.managers.fab.views.roles_list import CustomRoleModelView
from airflow.auth.managers.fab.views.user import (
CustomUserDBModelView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
)
from airflow.auth.managers.fab.views.user_edit import (
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomUserInfoEditView,
)
from airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.www.extensions.init_auth_manager import get_auth_manager
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
if TYPE_CHECKING:
from sqlalchemy.orm import Session
SecurityManagerOverride: type = object
else:
# Fetch the security manager override from the auth manager
SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()
class AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow."""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS: list[dict[str, Any]] = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder) -> None:
super().__init__(
appbuilder=appbuilder,
actionmodelview=self.actionmodelview,
authdbview=self.authdbview,
authldapview=self.authldapview,
authoauthview=self.authoauthview,
authoidview=self.authoidview,
authremoteuserview=self.authremoteuserview,
permissionmodelview=self.permissionmodelview,
registeruser_view=self.registeruser_view,
registeruserdbview=self.registeruserdbview,
registeruseroauthview=self.registeruseroauthview,
registerusermodelview=self.registerusermodelview,
registeruseroidview=self.registeruseroidview,
resetmypasswordview=self.resetmypasswordview,
resetpasswordview=self.resetpasswordview,
rolemodelview=self.rolemodelview,
user_model=self.user_model,
userinfoeditview=self.userinfoeditview,
userdbmodelview=self.userdbmodelview,
userldapmodelview=self.userldapmodelview,
useroauthmodelview=self.useroauthmodelview,
useroidmodelview=self.useroidmodelview,
userremoteusermodelview=self.userremoteusermodelview,
userstatschartview=self.userstatschartview,
)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id: str) -> str:
if "." in dag_id:
dm = self.appbuilder.get_session.execute(
select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)
).one()
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms) -> None:
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user) -> Iterable[DagModel]:
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(
self,
user_actions: Container[str] | None,
user,
session: Session = NEW_SESSION,
) -> Iterable[DagModel]:
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(
self,
user,
user_actions: Container[str] | None = None,
session: Session = NEW_SESSION,
) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if not get_auth_manager().is_logged_in():
roles = user.roles
else:
if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (
permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)
):
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
user_query = session.scalar(
select(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.where(User.id == user.id)
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {
dag.dag_id
for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))
}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id: str, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id: str) -> str:
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name: str) -> bool:
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name: str, resource_name: str, user=None) -> bool:
"""
Verify whether a given user could perform a certain action on the given resource.
Example actions might include can_read, can_write, can_delete, etc.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list: Container, user) -> bool:
"""Whether the user has this role name."""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user) -> bool:
"""
Has all the dag access in any of the 3 cases.
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.can_read_all_dags(user)
or self.can_edit_all_dags(user)
)
def can_edit_all_dags(self, user=None) -> bool:
"""Has can_edit action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
def can_read_all_dags(self, user=None) -> bool:
"""Has can_read action on DAG resource."""
return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
def clean_perms(self) -> None:
"""FAB leaves faulty permissions that need to be cleaned up."""
self.log.debug("Cleaning faulty perms")
sesh = self.appbuilder.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name: str, resource_name: str) -> None:
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = self.appbuilder.get_session.scalar(
select(self.permission_model).filter_by(action=action, resource=resource).limit(1)
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self) -> None:
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.appbuilder.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names."""
return set(
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name)
.join(self.permission_model.action)
.join(self.permission_model.resource)
)
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Get permissions except those that are for specific DAGs.
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.appbuilder.get_session.execute(
select(self.action_model.name, self.resource_model.name, self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.where(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions."""
return {
r.name: r
for r in self.appbuilder.get_session.scalars(
select(self.role_model).options(joinedload(self.role_model.permissions))
).unique()
}
def create_dag_specific_permissions(self) -> None:
"""
Add permissions to all DAGs.
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self) -> None:
"""
Add missing permissions to the table for admin.
Admin should get all the permissions, except the dag permissions
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
session = self.appbuilder.get_session
dag_resources = session.scalars(
select(Resource).where(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
)
resource_ids = [resource.id for resource in dag_resources]
perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
session.commit()
def sync_roles(self) -> None:
"""
Initialize default and custom roles with related permissions.
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(
self,
dag_id: str,
access_control: dict[str, Collection[str]] | None = None,
) -> None:
"""
Sync permissions for given dag id.
The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control is not None:
self.log.info("Syncing DAG-level permissions for DAG '%s'", dag_resource_name)
self._sync_dag_view_permissions(dag_resource_name, access_control)
else:
self.log.info(
"Not syncing DAG-level permissions for DAG '%s' as access control is unset.",
dag_resource_name,
)
def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, ())
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self) -> None:
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self,
perms: Sequence[tuple[str, str]] | None = None,
dag_id: str | None = None,
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class FakeAppBuilder:
"""Stand-in class to replace a Flask App Builder.
The only purpose is to provide the ``self.appbuilder.get_session`` interface
for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask
app, which is slow to create.
"""
def __init__(self, session: Session | None = None) -> None:
self.get_session = session
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app."""
def __init__(self, session: Session | None = None):
self.appbuilder = FakeAppBuilder(session)
|
flexible
|
{
"blob_id": "47cee0c659976a2b74e2bb07f6c4d622ceab7362",
"index": 3866,
"step-1": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n <mask token>\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n <mask token>\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n <mask token>\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-2": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n <mask token>\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n <mask token>\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n <mask token>\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-3": "<mask token>\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n <mask token>\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n <mask token>\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n <mask token>\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-4": "<mask token>\nEXISTING_ROLES = {'Admin', 'Viewer', 'User', 'Op', 'Public'}\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n SecurityManagerOverride: type = object\nelse:\n SecurityManagerOverride = get_auth_manager(\n ).get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager,\n LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n VIEWER_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_AUDIT_LOG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_CODE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_JOB), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_PLUGIN), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_XCOM), (permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_DAG_DEPENDENCIES), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_DATASET), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CLUSTER_ACTIVITY), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_TASK_INSTANCE)]\n USER_PERMISSIONS = [(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_CREATE, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_TASK_INSTANCE), (permissions.ACTION_CAN_CREATE,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_DAG_RUN), (permissions.ACTION_CAN_DELETE,\n permissions.RESOURCE_DAG_RUN)]\n OP_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU, permissions.\n RESOURCE_ADMIN_MENU), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONFIG), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_CONNECTION), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL), (permissions.\n ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER), (\n permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE), (\n permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM)]\n ADMIN_PERMISSIONS = [(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TASK_RESCHEDULE), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_ACCESS_MENU,\n permissions.RESOURCE_TRIGGER), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_PASSWORD), (permissions.ACTION_CAN_READ,\n permissions.RESOURCE_ROLE), (permissions.ACTION_CAN_EDIT,\n permissions.RESOURCE_ROLE)]\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n ROLE_CONFIGS: list[dict[str, Any]] = [{'role': 'Public', 'perms': []},\n {'role': 'Viewer', 'perms': VIEWER_PERMISSIONS}, {'role': 'User',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS}, {'role': 'Op',\n 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS}, {\n 'role': 'Admin', 'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS +\n OP_PERMISSIONS + ADMIN_PERMISSIONS}]\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) ->None:\n super().__init__(appbuilder=appbuilder, actionmodelview=self.\n actionmodelview, authdbview=self.authdbview, authldapview=self.\n authldapview, authoauthview=self.authoauthview, authoidview=\n self.authoidview, authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview, registeruser_view\n =self.registeruser_view, registeruserdbview=self.\n registeruserdbview, registeruseroauthview=self.\n registeruseroauthview, registerusermodelview=self.\n registerusermodelview, registeruseroidview=self.\n registeruseroidview, resetmypasswordview=self.\n resetmypasswordview, resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview, user_model=self.user_model,\n userinfoeditview=self.userinfoeditview, userdbmodelview=self.\n userdbmodelview, userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview, useroidmodelview=\n self.useroidmodelview, userremoteusermodelview=self.\n userremoteusermodelview, userstatschartview=self.userstatschartview\n )\n for attr in dir(self):\n if not attr.endswith('view'):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, 'datamodel', None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) ->str:\n if '.' in dag_id:\n dm = self.appbuilder.get_session.execute(select(DagModel.dag_id,\n DagModel.root_dag_id).where(DagModel.dag_id == dag_id)).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) ->None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n '`init_role` has been deprecated. Please use `bulk_sync_roles` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n self.bulk_sync_roles([{'role': role_name, 'perms': perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) ->None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n for config in roles:\n role_name = config['role']\n perms = config['perms']\n role = existing_roles.get(role_name) or self.add_role(role_name)\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)\n ) or self.create_permission(action_name, resource_name)\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n '`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user\n )\n\n def get_editable_dags(self, user) ->Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n '`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user\n )\n\n @provide_session\n def get_accessible_dags(self, user_actions: (Container[str] | None),\n user, session: Session=NEW_SESSION) ->Iterable[DagModel]:\n warnings.warn(\n '`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.'\n , RemovedInAirflow3Warning, stacklevel=3)\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(\n dag_ids)))\n\n def get_readable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) ->set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(self, user, user_actions: (Container[str] |\n None)=None, session: Session=NEW_SESSION) ->set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.\n ACTION_CAN_READ]\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.\n can_edit_all_dags(user) or permissions.ACTION_CAN_READ in\n user_actions and self.can_read_all_dags(user)):\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n user_query = session.scalar(select(User).options(joinedload(\n User.roles).subqueryload(Role.permissions).options(\n joinedload(Permission.action), joinedload(Permission.\n resource))).where(User.id == user.id))\n roles = user_query.roles\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(\n DagModel.dag_id))}\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.\n RESOURCE_DAG_PREFIX):])\n else:\n resources.add(resource)\n return {dag.dag_id for dag in session.execute(select(DagModel.\n dag_id).where(DagModel.dag_id.in_(resources)))}\n\n def can_access_some_dags(self, action: str, dag_id: (str | None)=None\n ) ->bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != '~':\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.\n resource_name_for_dag(root_dag_id))\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ,\n dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT,\n dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) ->bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE,\n dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) ->str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n '`prefixed_dag_id` has been deprecated. Please use `airflow.security.permissions.resource_name_for_dag` instead.'\n , RemovedInAirflow3Warning, stacklevel=2)\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) ->bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None\n ) ->bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n return False\n\n def _has_role(self, role_name_or_list: Container, user) ->bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) ->bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return self._has_role(['Admin', 'Viewer', 'Op', 'User'], user\n ) or self.can_read_all_dags(user) or self.can_edit_all_dags(user)\n\n def can_edit_all_dags(self, user=None) ->bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) ->bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG, user)\n\n def clean_perms(self) ->None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug('Cleaning faulty perms')\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(or_(Permission.action == None,\n Permission.resource == None))\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info('Deleted %s faulty permissions', deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) ->None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(select(self.\n permission_model).filter_by(action=action, resource=\n resource).limit(1))\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) ->None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.\n ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name\n not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) ->set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(self.appbuilder.get_session.execute(select(self.\n action_model.name, self.resource_model.name).join(self.\n permission_model.action).join(self.permission_model.resource)))\n\n def _get_all_non_dag_permissions(self) ->dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {(action_name, resource_name): viewmodel for action_name,\n resource_name, viewmodel in self.appbuilder.get_session.execute\n (select(self.action_model.name, self.resource_model.name, self.\n permission_model).join(self.permission_model.action).join(self.\n permission_model.resource).where(~self.resource_model.name.like\n (f'{permissions.RESOURCE_DAG_PREFIX}%')))}\n\n def _get_all_roles_with_permissions(self) ->dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {r.name: r for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.\n permissions))).unique()}\n\n def create_dag_specific_permissions(self) ->None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n for dag in dags:\n root_dag_id = (dag.parent_dag.dag_id if dag.parent_dag else dag\n .dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) ->None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(select(Resource).where(Resource.\n name.like(f'{permissions.RESOURCE_DAG_PREFIX}%')))\n resource_ids = [resource.id for resource in dag_resources]\n perms = session.scalars(select(Permission).where(~Permission.\n resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n admin = self.find_role('Admin')\n admin.permissions = list(set(admin.permissions) | set(perms))\n session.commit()\n\n def sync_roles(self) ->None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n self.create_perm_vm_for_all_dag()\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n self.add_homepage_access_to_custom_roles()\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: (Iterable[tuple[str, str]] |\n None)=None) ->None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(self, dag_id: str, access_control: (dict[str,\n Collection[str]] | None)=None) ->None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\",\n dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\"\n , dag_resource_name)\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[\n str, Collection[str]]) ->None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) ->(Permission |\n None):\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\",\n action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name !=\n 'Admin']\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\"Revoking '%s' on DAG '%s' for role '%s'\"\n , perm.action, dag_resource_name, role.name)\n self.remove_permission_from_role(role, perm)\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named '{rolename}', but that role does not exist\"\n )\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes the following invalid permissions: {invalid_action_names}; The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) ->None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(self, perms: (Sequence[tuple[str, str]] | None)\n =None, dag_id: (str | None)=None) ->bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n for perm in perms:\n if perm in ((permissions.ACTION_CAN_READ, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_EDIT, permissions.\n RESOURCE_DAG), (permissions.ACTION_CAN_DELETE, permissions.\n RESOURCE_DAG)):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n elif not self.has_access(*perm):\n return False\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: (Session | None)=None) ->None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: (Session | None)=None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import TYPE_CHECKING, Any, Collection, Container, Iterable, Sequence\n\nfrom flask import g\nfrom sqlalchemy import or_, select\nfrom sqlalchemy.orm import joinedload\n\nfrom airflow.auth.managers.fab.models import Permission, Resource, Role, User\nfrom airflow.auth.managers.fab.views.permissions import (\n ActionModelView,\n PermissionPairModelView,\n ResourceModelView,\n)\nfrom airflow.auth.managers.fab.views.roles_list import CustomRoleModelView\nfrom airflow.auth.managers.fab.views.user import (\n CustomUserDBModelView,\n CustomUserLDAPModelView,\n CustomUserOAuthModelView,\n CustomUserOIDModelView,\n CustomUserRemoteUserModelView,\n)\nfrom airflow.auth.managers.fab.views.user_edit import (\n CustomResetMyPasswordView,\n CustomResetPasswordView,\n CustomUserInfoEditView,\n)\nfrom airflow.auth.managers.fab.views.user_stats import CustomUserStatsChartView\nfrom airflow.exceptions import AirflowException, RemovedInAirflow3Warning\nfrom airflow.models import DagBag, DagModel\nfrom airflow.security import permissions\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.session import NEW_SESSION, provide_session\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\nfrom airflow.www.fab_security.sqla.manager import SecurityManager\nfrom airflow.www.utils import CustomSQLAInterface\n\nEXISTING_ROLES = {\n \"Admin\",\n \"Viewer\",\n \"User\",\n \"Op\",\n \"Public\",\n}\n\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n\n SecurityManagerOverride: type = object\nelse:\n # Fetch the security manager override from the auth manager\n SecurityManagerOverride = get_auth_manager().get_security_manager_override_class()\n\n\nclass AirflowSecurityManager(SecurityManagerOverride, SecurityManager, LoggingMixin):\n \"\"\"Custom security manager, which introduces a permission model adapted to Airflow.\"\"\"\n\n ###########################################################################\n # PERMISSIONS\n ###########################################################################\n\n # [START security_viewer_perms]\n VIEWER_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CLUSTER_ACTIVITY),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),\n ]\n # [END security_viewer_perms]\n\n # [START security_user_perms]\n USER_PERMISSIONS = [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),\n ]\n # [END security_user_perms]\n\n # [START security_op_perms]\n OP_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),\n (permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),\n ]\n # [END security_op_perms]\n\n ADMIN_PERMISSIONS = [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),\n ]\n\n # global resource for dag-level access\n DAG_RESOURCES = {permissions.RESOURCE_DAG}\n DAG_ACTIONS = permissions.DAG_ACTIONS\n\n ###########################################################################\n # DEFAULT ROLE CONFIGURATIONS\n ###########################################################################\n\n ROLE_CONFIGS: list[dict[str, Any]] = [\n {\"role\": \"Public\", \"perms\": []},\n {\"role\": \"Viewer\", \"perms\": VIEWER_PERMISSIONS},\n {\n \"role\": \"User\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS,\n },\n {\n \"role\": \"Op\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,\n },\n {\n \"role\": \"Admin\",\n \"perms\": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,\n },\n ]\n\n actionmodelview = ActionModelView\n permissionmodelview = PermissionPairModelView\n rolemodelview = CustomRoleModelView\n resourcemodelview = ResourceModelView\n userdbmodelview = CustomUserDBModelView\n resetmypasswordview = CustomResetMyPasswordView\n resetpasswordview = CustomResetPasswordView\n userinfoeditview = CustomUserInfoEditView\n userldapmodelview = CustomUserLDAPModelView\n useroauthmodelview = CustomUserOAuthModelView\n userremoteusermodelview = CustomUserRemoteUserModelView\n useroidmodelview = CustomUserOIDModelView\n userstatschartview = CustomUserStatsChartView\n\n def __init__(self, appbuilder) -> None:\n super().__init__(\n appbuilder=appbuilder,\n actionmodelview=self.actionmodelview,\n authdbview=self.authdbview,\n authldapview=self.authldapview,\n authoauthview=self.authoauthview,\n authoidview=self.authoidview,\n authremoteuserview=self.authremoteuserview,\n permissionmodelview=self.permissionmodelview,\n registeruser_view=self.registeruser_view,\n registeruserdbview=self.registeruserdbview,\n registeruseroauthview=self.registeruseroauthview,\n registerusermodelview=self.registerusermodelview,\n registeruseroidview=self.registeruseroidview,\n resetmypasswordview=self.resetmypasswordview,\n resetpasswordview=self.resetpasswordview,\n rolemodelview=self.rolemodelview,\n user_model=self.user_model,\n userinfoeditview=self.userinfoeditview,\n userdbmodelview=self.userdbmodelview,\n userldapmodelview=self.userldapmodelview,\n useroauthmodelview=self.useroauthmodelview,\n useroidmodelview=self.useroidmodelview,\n userremoteusermodelview=self.userremoteusermodelview,\n userstatschartview=self.userstatschartview,\n )\n\n # Go and fix up the SQLAInterface used from the stock one to our subclass.\n # This is needed to support the \"hack\" where we had to edit\n # FieldConverter.conversion_table in place in airflow.www.utils\n for attr in dir(self):\n if not attr.endswith(\"view\"):\n continue\n view = getattr(self, attr, None)\n if not view or not getattr(view, \"datamodel\", None):\n continue\n view.datamodel = CustomSQLAInterface(view.datamodel.obj)\n self.perms = None\n\n def _get_root_dag_id(self, dag_id: str) -> str:\n if \".\" in dag_id:\n dm = self.appbuilder.get_session.execute(\n select(DagModel.dag_id, DagModel.root_dag_id).where(DagModel.dag_id == dag_id)\n ).one()\n return dm.root_dag_id or dm.dag_id\n return dag_id\n\n def init_role(self, role_name, perms) -> None:\n \"\"\"\n Initialize the role with actions and related resources.\n\n :param role_name:\n :param perms:\n :return:\n \"\"\"\n warnings.warn(\n \"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n self.bulk_sync_roles([{\"role\": role_name, \"perms\": perms}])\n\n def bulk_sync_roles(self, roles: Iterable[dict[str, Any]]) -> None:\n \"\"\"Sync the provided roles and permissions.\"\"\"\n existing_roles = self._get_all_roles_with_permissions()\n non_dag_perms = self._get_all_non_dag_permissions()\n\n for config in roles:\n role_name = config[\"role\"]\n perms = config[\"perms\"]\n role = existing_roles.get(role_name) or self.add_role(role_name)\n\n for action_name, resource_name in perms:\n perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(\n action_name, resource_name\n )\n\n if perm not in role.permissions:\n self.add_permission_to_role(role, perm)\n\n @staticmethod\n def get_user_roles(user=None):\n \"\"\"\n Get all the roles associated with the user.\n\n :param user: the ab_user in FAB model.\n :return: a list of roles associated with the user.\n \"\"\"\n if user is None:\n user = g.user\n return user.roles\n\n def get_readable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs readable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)\n\n def get_editable_dags(self, user) -> Iterable[DagModel]:\n \"\"\"Gets the DAGs editable by authenticated user.\"\"\"\n warnings.warn(\n \"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RemovedInAirflow3Warning)\n return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)\n\n @provide_session\n def get_accessible_dags(\n self,\n user_actions: Container[str] | None,\n user,\n session: Session = NEW_SESSION,\n ) -> Iterable[DagModel]:\n warnings.warn(\n \"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=3,\n )\n dag_ids = self.get_accessible_dag_ids(user, user_actions, session)\n return session.scalars(select(DagModel).where(DagModel.dag_id.in_(dag_ids)))\n\n def get_readable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs readable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])\n\n def get_editable_dag_ids(self, user) -> set[str]:\n \"\"\"Gets the DAG IDs editable by authenticated user.\"\"\"\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])\n\n @provide_session\n def get_accessible_dag_ids(\n self,\n user,\n user_actions: Container[str] | None = None,\n session: Session = NEW_SESSION,\n ) -> set[str]:\n \"\"\"Generic function to get readable or writable DAGs for user.\"\"\"\n if not user_actions:\n user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]\n\n if not get_auth_manager().is_logged_in():\n roles = user.roles\n else:\n if (permissions.ACTION_CAN_EDIT in user_actions and self.can_edit_all_dags(user)) or (\n permissions.ACTION_CAN_READ in user_actions and self.can_read_all_dags(user)\n ):\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n user_query = session.scalar(\n select(User)\n .options(\n joinedload(User.roles)\n .subqueryload(Role.permissions)\n .options(joinedload(Permission.action), joinedload(Permission.resource))\n )\n .where(User.id == user.id)\n )\n roles = user_query.roles\n\n resources = set()\n for role in roles:\n for permission in role.permissions:\n action = permission.action.name\n if action not in user_actions:\n continue\n\n resource = permission.resource.name\n if resource == permissions.RESOURCE_DAG:\n return {dag.dag_id for dag in session.execute(select(DagModel.dag_id))}\n\n if resource.startswith(permissions.RESOURCE_DAG_PREFIX):\n resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])\n else:\n resources.add(resource)\n return {\n dag.dag_id\n for dag in session.execute(select(DagModel.dag_id).where(DagModel.dag_id.in_(resources)))\n }\n\n def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:\n \"\"\"Checks if user has read or write access to some dags.\"\"\"\n if dag_id and dag_id != \"~\":\n root_dag_id = self._get_root_dag_id(dag_id)\n return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))\n\n user = g.user\n if action == permissions.ACTION_CAN_READ:\n return any(self.get_readable_dag_ids(user))\n return any(self.get_editable_dag_ids(user))\n\n def can_read_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG read access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)\n\n def can_edit_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG edit access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)\n\n def can_delete_dag(self, dag_id: str, user=None) -> bool:\n \"\"\"Determines whether a user has DAG delete access.\"\"\"\n root_dag_id = self._get_root_dag_id(dag_id)\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)\n\n def prefixed_dag_id(self, dag_id: str) -> str:\n \"\"\"Returns the permission name for a DAG id.\"\"\"\n warnings.warn(\n \"`prefixed_dag_id` has been deprecated. \"\n \"Please use `airflow.security.permissions.resource_name_for_dag` instead.\",\n RemovedInAirflow3Warning,\n stacklevel=2,\n )\n root_dag_id = self._get_root_dag_id(dag_id)\n return permissions.resource_name_for_dag(root_dag_id)\n\n def is_dag_resource(self, resource_name: str) -> bool:\n \"\"\"Determines if a resource belongs to a DAG or all DAGs.\"\"\"\n if resource_name == permissions.RESOURCE_DAG:\n return True\n return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)\n\n def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n \"\"\"\n Verify whether a given user could perform a certain action on the given resource.\n\n Example actions might include can_read, can_write, can_delete, etc.\n\n :param action_name: action_name on resource (e.g can_read, can_edit).\n :param resource_name: name of view-menu or resource.\n :param user: user name\n :return: Whether user could perform certain action on the resource.\n :rtype bool\n \"\"\"\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False\n\n def _has_role(self, role_name_or_list: Container, user) -> bool:\n \"\"\"Whether the user has this role name.\"\"\"\n if not isinstance(role_name_or_list, list):\n role_name_or_list = [role_name_or_list]\n return any(r.name in role_name_or_list for r in user.roles)\n\n def has_all_dags_access(self, user) -> bool:\n \"\"\"\n Has all the dag access in any of the 3 cases.\n\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n \"\"\"\n if not user:\n user = g.user\n return (\n self._has_role([\"Admin\", \"Viewer\", \"Op\", \"User\"], user)\n or self.can_read_all_dags(user)\n or self.can_edit_all_dags(user)\n )\n\n def can_edit_all_dags(self, user=None) -> bool:\n \"\"\"Has can_edit action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)\n\n def can_read_all_dags(self, user=None) -> bool:\n \"\"\"Has can_read action on DAG resource.\"\"\"\n return self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)\n\n def clean_perms(self) -> None:\n \"\"\"FAB leaves faulty permissions that need to be cleaned up.\"\"\"\n self.log.debug(\"Cleaning faulty perms\")\n sesh = self.appbuilder.get_session\n perms = sesh.query(Permission).filter(\n or_(\n Permission.action == None, # noqa\n Permission.resource == None, # noqa\n )\n )\n # Since FAB doesn't define ON DELETE CASCADE on these tables, we need\n # to delete the _object_ so that SQLA knows to delete the many-to-many\n # relationship object too. :(\n\n deleted_count = 0\n for perm in perms:\n sesh.delete(perm)\n deleted_count += 1\n sesh.commit()\n if deleted_count:\n self.log.info(\"Deleted %s faulty permissions\", deleted_count)\n\n def _merge_perm(self, action_name: str, resource_name: str) -> None:\n \"\"\"\n Add the new (action, resource) to assoc_permission_role if it doesn't exist.\n\n It will add the related entry to ab_permission and ab_resource two meta tables as well.\n\n :param action_name: Name of the action\n :param resource_name: Name of the resource\n :return:\n \"\"\"\n action = self.get_action(action_name)\n resource = self.get_resource(resource_name)\n perm = None\n if action and resource:\n perm = self.appbuilder.get_session.scalar(\n select(self.permission_model).filter_by(action=action, resource=resource).limit(1)\n )\n if not perm and action_name and resource_name:\n self.create_permission(action_name, resource_name)\n\n def add_homepage_access_to_custom_roles(self) -> None:\n \"\"\"\n Add Website.can_read access to all custom roles.\n\n :return: None.\n \"\"\"\n website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)\n custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]\n for role in custom_roles:\n self.add_permission_to_role(role, website_permission)\n\n self.appbuilder.get_session.commit()\n\n def get_all_permissions(self) -> set[tuple[str, str]]:\n \"\"\"Returns all permissions as a set of tuples with the action and resource names.\"\"\"\n return set(\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n )\n )\n\n def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:\n \"\"\"\n Get permissions except those that are for specific DAGs.\n\n Returns a dict with a key of (action_name, resource_name) and value of permission\n with all permissions except those that are for specific DAGs.\n \"\"\"\n return {\n (action_name, resource_name): viewmodel\n for action_name, resource_name, viewmodel in (\n self.appbuilder.get_session.execute(\n select(self.action_model.name, self.resource_model.name, self.permission_model)\n .join(self.permission_model.action)\n .join(self.permission_model.resource)\n .where(~self.resource_model.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n )\n }\n\n def _get_all_roles_with_permissions(self) -> dict[str, Role]:\n \"\"\"Returns a dict with a key of role name and value of role with early loaded permissions.\"\"\"\n return {\n r.name: r\n for r in self.appbuilder.get_session.scalars(\n select(self.role_model).options(joinedload(self.role_model.permissions))\n ).unique()\n }\n\n def create_dag_specific_permissions(self) -> None:\n \"\"\"\n Add permissions to all DAGs.\n\n Creates 'can_read', 'can_edit', and 'can_delete' permissions for all\n DAGs, along with any `access_control` permissions provided in them.\n\n This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`\n if you only need to sync a single DAG.\n\n :return: None.\n \"\"\"\n perms = self.get_all_permissions()\n dagbag = DagBag(read_dags_from_db=True)\n dagbag.collect_dags_from_db()\n dags = dagbag.dags.values()\n\n for dag in dags:\n root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id\n dag_resource_name = permissions.resource_name_for_dag(root_dag_id)\n for action_name in self.DAG_ACTIONS:\n if (action_name, dag_resource_name) not in perms:\n self._merge_perm(action_name, dag_resource_name)\n\n if dag.access_control:\n self.sync_perm_for_dag(dag_resource_name, dag.access_control)\n\n def update_admin_permission(self) -> None:\n \"\"\"\n Add missing permissions to the table for admin.\n\n Admin should get all the permissions, except the dag permissions\n because Admin already has Dags permission.\n Add the missing ones to the table for admin.\n\n :return: None.\n \"\"\"\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()\n\n def sync_roles(self) -> None:\n \"\"\"\n Initialize default and custom roles with related permissions.\n\n 1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n 2. Init the custom role(dag-user) with related permissions.\n\n :return: None.\n \"\"\"\n # Create global all-dag permissions\n self.create_perm_vm_for_all_dag()\n\n # Sync the default roles (Admin, Viewer, User, Op, public) with related permissions\n self.bulk_sync_roles(self.ROLE_CONFIGS)\n\n self.add_homepage_access_to_custom_roles()\n # init existing roles, the rest role could be created through UI.\n self.update_admin_permission()\n self.clean_perms()\n\n def sync_resource_permissions(self, perms: Iterable[tuple[str, str]] | None = None) -> None:\n \"\"\"Populates resource-based permissions.\"\"\"\n if not perms:\n return\n\n for action_name, resource_name in perms:\n self.create_resource(resource_name)\n self.create_permission(action_name, resource_name)\n\n def sync_perm_for_dag(\n self,\n dag_id: str,\n access_control: dict[str, Collection[str]] | None = None,\n ) -> None:\n \"\"\"\n Sync permissions for given dag id.\n\n The dag id surely exists in our dag bag as only / refresh button or DagBag will call this function.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g.,\n {'can_read'}\n :return:\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n for dag_action_name in self.DAG_ACTIONS:\n self.create_permission(dag_action_name, dag_resource_name)\n\n if access_control is not None:\n self.log.info(\"Syncing DAG-level permissions for DAG '%s'\", dag_resource_name)\n self._sync_dag_view_permissions(dag_resource_name, access_control)\n else:\n self.log.info(\n \"Not syncing DAG-level permissions for DAG '%s' as access control is unset.\",\n dag_resource_name,\n )\n\n def _sync_dag_view_permissions(self, dag_id: str, access_control: dict[str, Collection[str]]) -> None:\n \"\"\"\n Set the access policy on the given DAG's ViewModel.\n\n :param dag_id: the ID of the DAG whose permissions should be updated\n :param access_control: a dict where each key is a rolename and\n each value is a set() of action names (e.g. {'can_read'})\n \"\"\"\n dag_resource_name = permissions.resource_name_for_dag(dag_id)\n\n def _get_or_create_dag_permission(action_name: str) -> Permission | None:\n perm = self.get_permission(action_name, dag_resource_name)\n if not perm:\n self.log.info(\"Creating new action '%s' on resource '%s'\", action_name, dag_resource_name)\n perm = self.create_permission(action_name, dag_resource_name)\n\n return perm\n\n def _revoke_stale_permissions(resource: Resource):\n existing_dag_perms = self.get_resource_permissions(resource)\n for perm in existing_dag_perms:\n non_admin_roles = [role for role in perm.role if role.name != \"Admin\"]\n for role in non_admin_roles:\n target_perms_for_role = access_control.get(role.name, ())\n if perm.action.name not in target_perms_for_role:\n self.log.info(\n \"Revoking '%s' on DAG '%s' for role '%s'\",\n perm.action,\n dag_resource_name,\n role.name,\n )\n self.remove_permission_from_role(role, perm)\n\n resource = self.get_resource(dag_resource_name)\n if resource:\n _revoke_stale_permissions(resource)\n\n for rolename, action_names in access_control.items():\n role = self.find_role(rolename)\n if not role:\n raise AirflowException(\n f\"The access_control mapping for DAG '{dag_id}' includes a role named \"\n f\"'{rolename}', but that role does not exist\"\n )\n\n action_names = set(action_names)\n invalid_action_names = action_names - self.DAG_ACTIONS\n if invalid_action_names:\n raise AirflowException(\n f\"The access_control map for DAG '{dag_resource_name}' includes \"\n f\"the following invalid permissions: {invalid_action_names}; \"\n f\"The set of valid permissions is: {self.DAG_ACTIONS}\"\n )\n\n for action_name in action_names:\n dag_perm = _get_or_create_dag_permission(action_name)\n if dag_perm:\n self.add_permission_to_role(role, dag_perm)\n\n def create_perm_vm_for_all_dag(self) -> None:\n \"\"\"Create perm-vm if not exist and insert into FAB security model for all-dags.\"\"\"\n # create perm for global logical dag\n for resource_name in self.DAG_RESOURCES:\n for action_name in self.DAG_ACTIONS:\n self._merge_perm(action_name, resource_name)\n\n def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n \"\"\"Checks that the logged in user has the specified permissions.\"\"\"\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True\n\n\nclass FakeAppBuilder:\n \"\"\"Stand-in class to replace a Flask App Builder.\n\n The only purpose is to provide the ``self.appbuilder.get_session`` interface\n for ``ApplessAirflowSecurityManager`` so it can be used without a real Flask\n app, which is slow to create.\n \"\"\"\n\n def __init__(self, session: Session | None = None) -> None:\n self.get_session = session\n\n\nclass ApplessAirflowSecurityManager(AirflowSecurityManager):\n \"\"\"Security Manager that doesn't need the whole flask app.\"\"\"\n\n def __init__(self, session: Session | None = None):\n self.appbuilder = FakeAppBuilder(session)\n",
"step-ids": [
33,
36,
40,
47,
49
]
}
|
[
33,
36,
40,
47,
49
] |
# Software Name: MOON
# Version: 5.4
# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors
# SPDX-License-Identifier: Apache-2.0
# This software is distributed under the 'Apache License 2.0',
# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'
# or see the "LICENSE" file for more details.
def update_pdp(pdp_id, value):
from moon_manager.db_driver import PDPManager
return PDPManager.update_pdp("", pdp_id, value)
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp("", pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp("", pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp("", pdp_id)
|
normal
|
{
"blob_id": "af35075eaca9bba3d6bdb73353eaf944869cdede",
"index": 799,
"step-1": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp('', pdp_id)\n",
"step-4": "def update_pdp(pdp_id, value):\n from moon_manager.db_driver import PDPManager\n return PDPManager.update_pdp('', pdp_id, value)\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp('', pdp_id)\n",
"step-5": "# Software Name: MOON\n\n# Version: 5.4\n\n# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors\n# SPDX-License-Identifier: Apache-2.0\n\n# This software is distributed under the 'Apache License 2.0',\n# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'\n# or see the \"LICENSE\" file for more details.\n\n\n\ndef update_pdp(pdp_id, value):\n from moon_manager.db_driver import PDPManager\n return PDPManager.update_pdp(\"\", pdp_id, value)\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp(\"\", pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp(\"\", pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp(\"\", pdp_id)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, 'ajaxornot/index.html')
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view1.html', context)
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view2_table.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, 'ajaxornot/view3.html')
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))
context = {'items': data}
return render(request, 'ajaxornot/view4.html', context)
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, 'ajaxornot/view6.html')
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, 'ajaxornot/view7b.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, 'ajaxornot/index.html')
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view1.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, 'ajaxornot/view2.html')
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view2_table.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, 'ajaxornot/view3.html')
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))
context = {'items': data}
return render(request, 'ajaxornot/view4.html', context)
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, 'ajaxornot/view6.html')
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, 'ajaxornot/view7b.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, 'ajaxornot/index.html')
def get_data(max_length=1000, pub_date_format=None, offset=0):
items = []
category_names = dict((x.id, x.name) for x in Category.objects.all())
categories = defaultdict(list)
for e in BlogItem.categories.through.objects.all():
categories[e.blogitem_id].append(category_names[e.category_id])
qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')
for item in qs[offset:max_length]:
pub_date = item.pub_date
if pub_date_format:
pub_date = pub_date_format(pub_date)
items.append({'title': item.title, 'slug': item.oid, 'pub_date':
pub_date, 'keywords': [x for x in item.proper_keywords if x][:3
], 'categories': categories[item.id][:3]})
return items
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view1.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, 'ajaxornot/view2.html')
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view2_table.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, 'ajaxornot/view3.html')
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view3_data(request):
return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))
context = {'items': data}
return render(request, 'ajaxornot/view4.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5(request):
context = {'items': get_data(max_length=25)}
return render(request, 'ajaxornot/view5.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5_table(request):
context = {'items': get_data(offset=25)}
return render(request, 'ajaxornot/view5_trs.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, 'ajaxornot/view6.html')
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}
@cache_control(public=True, max_age=ONE_MONTH)
def view7a(request):
return render(request, 'ajaxornot/view7a.html')
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, 'ajaxornot/view7b.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ONE_MONTH = 60 * 60 * 24 * 30
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, 'ajaxornot/index.html')
def get_data(max_length=1000, pub_date_format=None, offset=0):
items = []
category_names = dict((x.id, x.name) for x in Category.objects.all())
categories = defaultdict(list)
for e in BlogItem.categories.through.objects.all():
categories[e.blogitem_id].append(category_names[e.category_id])
qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')
for item in qs[offset:max_length]:
pub_date = item.pub_date
if pub_date_format:
pub_date = pub_date_format(pub_date)
items.append({'title': item.title, 'slug': item.oid, 'pub_date':
pub_date, 'keywords': [x for x in item.proper_keywords if x][:3
], 'categories': categories[item.id][:3]})
return items
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view1.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, 'ajaxornot/view2.html')
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {'items': get_data()}
return render(request, 'ajaxornot/view2_table.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, 'ajaxornot/view3.html')
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view3_data(request):
return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))
context = {'items': data}
return render(request, 'ajaxornot/view4.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5(request):
context = {'items': get_data(max_length=25)}
return render(request, 'ajaxornot/view5.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5_table(request):
context = {'items': get_data(offset=25)}
return render(request, 'ajaxornot/view5_trs.html', context)
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, 'ajaxornot/view6.html')
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}
@cache_control(public=True, max_age=ONE_MONTH)
def view7a(request):
return render(request, 'ajaxornot/view7a.html')
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, 'ajaxornot/view7b.html')
<|reserved_special_token_1|>
from collections import defaultdict
from django.shortcuts import render
from django.views.decorators.cache import cache_control
from peterbecom.plog.models import BlogItem, Category
from peterbecom.plog.utils import utc_now
from peterbecom.plog.views import json_view
ONE_MONTH = 60 * 60 * 24 * 30
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, "ajaxornot/index.html")
def get_data(max_length=1000, pub_date_format=None, offset=0):
items = []
category_names = dict((x.id, x.name) for x in Category.objects.all())
categories = defaultdict(list)
for e in BlogItem.categories.through.objects.all():
categories[e.blogitem_id].append(category_names[e.category_id])
qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by("-pub_date")
for item in qs[offset:max_length]:
pub_date = item.pub_date
if pub_date_format:
pub_date = pub_date_format(pub_date)
items.append(
{
"title": item.title,
"slug": item.oid,
"pub_date": pub_date,
"keywords": [x for x in item.proper_keywords if x][:3],
"categories": categories[item.id][:3],
}
)
return items
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view1.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, "ajaxornot/view2.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view2_table.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, "ajaxornot/view3.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view3_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime("%B %Y"))
context = {"items": data}
return render(request, "ajaxornot/view4.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5(request):
context = {"items": get_data(max_length=25)}
return render(request, "ajaxornot/view5.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5_table(request):
context = {"items": get_data(offset=25)}
return render(request, "ajaxornot/view5_trs.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, "ajaxornot/view6.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view7a(request):
return render(request, "ajaxornot/view7a.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, "ajaxornot/view7b.html")
|
flexible
|
{
"blob_id": "e90fb3b6009dd4fb780649c04398b361fa1ae195",
"index": 8489,
"step-1": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-2": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-3": "<mask token>\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append({'title': item.title, 'slug': item.oid, 'pub_date':\n pub_date, 'keywords': [x for x in item.proper_keywords if x][:3\n ], 'categories': categories[item.id][:3]})\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {'items': get_data(max_length=25)}\n return render(request, 'ajaxornot/view5.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {'items': get_data(offset=25)}\n return render(request, 'ajaxornot/view5_trs.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, 'ajaxornot/view7a.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-4": "<mask token>\nONE_MONTH = 60 * 60 * 24 * 30\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, 'ajaxornot/index.html')\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by('-pub_date')\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append({'title': item.title, 'slug': item.oid, 'pub_date':\n pub_date, 'keywords': [x for x in item.proper_keywords if x][:3\n ], 'categories': categories[item.id][:3]})\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view1.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, 'ajaxornot/view2.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {'items': get_data()}\n return render(request, 'ajaxornot/view2_table.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, 'ajaxornot/view3.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime('%B %Y'))\n context = {'items': data}\n return render(request, 'ajaxornot/view4.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {'items': get_data(max_length=25)}\n return render(request, 'ajaxornot/view5.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {'items': get_data(offset=25)}\n return render(request, 'ajaxornot/view5_trs.html', context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, 'ajaxornot/view6.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {'items': get_data(pub_date_format=lambda x: x.strftime('%B %Y'))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, 'ajaxornot/view7a.html')\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, 'ajaxornot/view7b.html')\n",
"step-5": "from collections import defaultdict\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import cache_control\n\nfrom peterbecom.plog.models import BlogItem, Category\nfrom peterbecom.plog.utils import utc_now\nfrom peterbecom.plog.views import json_view\n\nONE_MONTH = 60 * 60 * 24 * 30\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef index(request):\n return render(request, \"ajaxornot/index.html\")\n\n\ndef get_data(max_length=1000, pub_date_format=None, offset=0):\n items = []\n category_names = dict((x.id, x.name) for x in Category.objects.all())\n categories = defaultdict(list)\n for e in BlogItem.categories.through.objects.all():\n categories[e.blogitem_id].append(category_names[e.category_id])\n qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by(\"-pub_date\")\n for item in qs[offset:max_length]:\n pub_date = item.pub_date\n if pub_date_format:\n pub_date = pub_date_format(pub_date)\n items.append(\n {\n \"title\": item.title,\n \"slug\": item.oid,\n \"pub_date\": pub_date,\n \"keywords\": [x for x in item.proper_keywords if x][:3],\n \"categories\": categories[item.id][:3],\n }\n )\n return items\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view1(request):\n context = {\"items\": get_data()}\n return render(request, \"ajaxornot/view1.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2(request):\n return render(request, \"ajaxornot/view2.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view2_table(request):\n context = {\"items\": get_data()}\n return render(request, \"ajaxornot/view2_table.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view3(request):\n return render(request, \"ajaxornot/view3.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view3_data(request):\n return {\"items\": get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view4(request):\n data = get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))\n context = {\"items\": data}\n return render(request, \"ajaxornot/view4.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5(request):\n context = {\"items\": get_data(max_length=25)}\n return render(request, \"ajaxornot/view5.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view5_table(request):\n context = {\"items\": get_data(offset=25)}\n return render(request, \"ajaxornot/view5_trs.html\", context)\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view6(request):\n return render(request, \"ajaxornot/view6.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\n@json_view\ndef view6_data(request):\n return {\"items\": get_data(pub_date_format=lambda x: x.strftime(\"%B %Y\"))}\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7a(request):\n return render(request, \"ajaxornot/view7a.html\")\n\n\n@cache_control(public=True, max_age=ONE_MONTH)\ndef view7b(request):\n return render(request, \"ajaxornot/view7b.html\")\n",
"step-ids": [
7,
9,
14,
15,
17
]
}
|
[
7,
9,
14,
15,
17
] |
"""Woma objects for dealing with HTTP.
Request and Response inherit from webob's Request and Response objects, so see
http://docs.webob.org/en/latest/ for full documentation. The only things
documented here are the customizations.
"""
from webob import Request as BaseRequest
from webob import Response as BaseResponse
class Client(object):
"""Make requests to a wsgi app and return the response."""
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
def post(self, path=None, body=None):
return self.request(path, 'POST', body)
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(
status_code=200,
content_type=request.content_type or 'text/plain',
charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
|
normal
|
{
"blob_id": "ca11e9cf0bcfcbd714c45b5c95bd2c2044b65909",
"index": 384,
"step-1": "<mask token>\n\n\nclass Client(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-2": "<mask token>\n\n\nclass Client(object):\n <mask token>\n\n def __init__(self, app):\n self.app = app\n <mask token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n <mask token>\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-3": "<mask token>\n\n\nclass Client(object):\n <mask token>\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n <mask token>\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-4": "<mask token>\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-5": "\"\"\"Woma objects for dealing with HTTP.\n\nRequest and Response inherit from webob's Request and Response objects, so see\nhttp://docs.webob.org/en/latest/ for full documentation. The only things\ndocumented here are the customizations.\n\n\"\"\"\nfrom webob import Request as BaseRequest\nfrom webob import Response as BaseResponse\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(\n status_code=200,\n content_type=request.content_type or 'text/plain',\n charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-ids": [
8,
11,
12,
14,
16
]
}
|
[
8,
11,
12,
14,
16
] |
operation = input('operation type: ').lower()
num1 = input("First number: ")
num2 = input("First number: ")
try:
num1, num2 = float(num1), float(num2)
if operation == 'add':
result = num1 + num2
print(result)
elif operation == 'subtract':
result = num1 - num2
print(result)
elif operation == 'multiply':
result = num1 * num2
print(result)
elif operation == 'divide':
result = num1 / num2
print(result)
else:
print('You didi choose the right operation')
except:
#
print("Impoper numbers or Operation")
|
normal
|
{
"blob_id": "bafb6c09ecd0017428441e109733ebcb189863ad",
"index": 3598,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\nexcept:\n print('Impoper numbers or Operation')\n",
"step-3": "operation = input('operation type: ').lower()\nnum1 = input('First number: ')\nnum2 = input('First number: ')\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\nexcept:\n print('Impoper numbers or Operation')\n",
"step-4": "operation = input('operation type: ').lower()\nnum1 = input(\"First number: \")\nnum2 = input(\"First number: \")\n\ntry:\n num1, num2 = float(num1), float(num2)\n if operation == 'add':\n result = num1 + num2\n print(result)\n elif operation == 'subtract':\n result = num1 - num2\n print(result)\n elif operation == 'multiply':\n result = num1 * num2\n print(result)\n elif operation == 'divide':\n result = num1 / num2\n print(result)\n else:\n print('You didi choose the right operation')\n\nexcept:\n #\n print(\"Impoper numbers or Operation\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from functools import partial
import inspect
from itertools import product
import math
import os
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy
from scipy.sparse import csr_matrix
import scipy.stats as osp
import jax
from jax import grad, lax, vmap
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import expit, logsumexp
from jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm
import numpyro.distributions as dist
from numpyro.distributions import (
SineBivariateVonMises,
constraints,
kl_divergence,
transforms,
)
from numpyro.distributions.batch_util import vmap_over
from numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom
from numpyro.distributions.flows import InverseAutoregressiveTransform
from numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit
from numpyro.distributions.transforms import (
LowerCholeskyAffine,
PermuteTransform,
PowerTransform,
SimplexToOrderedTransform,
SoftplusTransform,
biject_to,
)
from numpyro.distributions.util import (
matrix_to_tril_vec,
multinomial,
signed_stick_breaking_tril,
sum_rightmost,
vec_to_tril_matrix,
)
from numpyro.nn import AutoregressiveNN
TEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])
return D.reshape(newshape)
def _identity(x):
return x
def _circ_mean(angles):
return jnp.arctan2(
jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)
)
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
def sde_fn2(xy, _):
tau, a = 2.0, 1.1
x, y = xy[0], xy[1]
dx = tau * (x - x**3.0 / 3.0 + y)
dy = (1.0 / tau) * (a - x)
dxy = jnp.vstack([dx, dy]).reshape(xy.shape)
sigma2 = 0.1
return dxy, sigma2
class T(namedtuple("TestCase", ["jax_dist", "sp_dist", "params"])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < "1.6.0":
pytest.skip(
"Multivariate Student-T distribution is not available in scipy < 1.6"
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _truncnorm_to_scipy(loc, scale, low, high):
if low is None:
a = -np.inf
else:
a = (low - loc) / scale
if high is None:
b = np.inf
else:
b = (high - loc) / scale
return osp.truncnorm(a, b, loc=loc, scale=scale)
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
_TruncatedNormal.arg_constraints = {}
_TruncatedNormal.reparametrized_params = []
_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0]), np.array([1.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(
self: SineSkewedVonMisesBatched, skewness=None
):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(
self.component_distribution, loc=loc, scale=scale
)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(
loc=loc, covariance_matrix=covariance_matrix
)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [
dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [
vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [
dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)
for loc_, covariance_matrix in zip(locs, covariance_matrices)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [
vmap_over(d, loc=locs) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {"rate": constraints.positive, "gate_logits": constraints.real}
pytree_data_fields = ("rate",)
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(
self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None
):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(
self,
base_dist=vmap_over(self.base_dist, rate=rate),
gate_logits=gate_logits,
gate=gate_logits,
)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: "FoldedNormal", loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(
self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)
)
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ["loc", "correlation", "conditional_precision"]
def __init__(
self,
loc,
correlation,
conditional_precision,
adj_matrix,
*,
is_sparse=True,
validate_args=None,
):
super().__init__(
loc,
correlation,
conditional_precision,
adj_matrix,
is_sparse=True,
validate_args=validate_args,
)
_DIST_MAP = {
dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(
asymmetry, loc=loc, scale=scale
),
dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
dist.Beta: lambda con1, con0: osp.beta(con1, con0),
dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),
dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
dist.BinomialLogits: lambda logits, total_count: osp.binom(
n=total_count, p=_to_probs_bernoulli(logits)
),
dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
dist.Chi2: lambda df: osp.chi2(df),
dist.Dirichlet: lambda conc: osp.dirichlet(conc),
dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),
dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),
dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),
dist.GeometricLogits: lambda logits: osp.geom(
p=_to_probs_bernoulli(logits), loc=-1
),
dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),
dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),
dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),
dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),
dist.LogUniform: lambda a, b: osp.loguniform(a, b),
dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(
n=total_count, p=probs
),
dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(
n=total_count, p=_to_probs_multinom(logits)
),
dist.MultivariateNormal: _mvn_to_scipy,
dist.MultivariateStudentT: _multivariate_t_to_scipy,
dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,
dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),
dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),
dist.Poisson: lambda rate: osp.poisson(rate),
dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),
dist.Uniform: lambda a, b: osp.uniform(a, b - a),
dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),
dist.VonMises: lambda loc, conc: osp.vonmises(
loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)
),
dist.Weibull: lambda scale, conc: osp.weibull_min(
c=conc,
scale=scale,
),
_TruncatedNormal: _truncnorm_to_scipy,
}
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
CONTINUOUS = [
T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),
T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),
T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),
T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),
T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),
T(
dist.AsymmetricLaplaceQuantile,
np.array([[1.0], [2.0]]),
2.0,
np.array([0.2, 0.8]),
),
T(dist.Beta, 0.2, 1.1),
T(dist.Beta, 1.0, np.array([2.0, 2.0])),
T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(dist.BetaProportion, 0.2, 10.0),
T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),
T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),
T(dist.Chi2, 2.0),
T(dist.Chi2, np.array([0.3, 1.3])),
T(dist.Cauchy, 0.0, 1.0),
T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),
T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Dirichlet, np.array([1.7])),
T(dist.Dirichlet, np.array([0.2, 1.1])),
T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn1,
dist.Normal(0.1, 1.0),
),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),
),
T(dist.Exponential, 2.0),
T(dist.Exponential, np.array([4.0, 2.0])),
T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.GaussianRandomWalk, 0.1, 10),
T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),
T(
dist.GaussianCopulaBeta,
np.array([7.0, 2.0]),
np.array([4.0, 10.0]),
np.array([[1.0, 0.75], [0.75, 1.0]]),
),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),
T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Gumbel, 0.0, 1.0),
T(dist.Gumbel, 0.5, 2.0),
T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),
T(FoldedNormal, 2.0, 4.0),
T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),
T(dist.HalfCauchy, 1.0),
T(dist.HalfCauchy, np.array([1.0, 2.0])),
T(dist.HalfNormal, 1.0),
T(dist.HalfNormal, np.array([1.0, 2.0])),
T(_ImproperWrapper, constraints.positive, (), (3,)),
T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),
T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Kumaraswamy, 0.6, 0.5),
T(dist.Laplace, 0.0, 1.0),
T(dist.Laplace, 0.5, np.array([1.0, 2.5])),
T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),
T(dist.LKJ, 2, 0.5, "onion"),
T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
T(dist.LKJCholesky, 2, 0.5, "onion"),
T(dist.LKJCholesky, 2, 0.5, "cvine"),
T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "onion"),
pytest.param(
*T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "onion"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "cvine"),
T(dist.Logistic, 0.0, 1.0),
T(dist.Logistic, 1.0, np.array([1.0, 2.0])),
T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.LogNormal, 1.0, 0.2),
T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),
T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),
T(dist.LogUniform, 1.0, 2.0),
T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),
T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),
T(
dist.MatrixNormal,
1.0 * np.arange(6).reshape(3, 2),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),
np.array([[1.0, 0], [0.4, 1]]),
),
T(
dist.MatrixNormal,
1.0 * np.arange(12).reshape((2, 3, 2)),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),
np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),
),
T(
dist.MatrixNormal,
1.0 * np.arange(36).reshape((2, 3, 3, 2)),
np.identity(3),
np.identity(2),
),
T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[1.0, 0.5], [0.5, 1.0]]),
None,
),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[[1.0, 0.5], [0.5, 1.0]]]),
None,
),
T(
dist.MultivariateNormal,
np.array([2.0]),
None,
None,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateNormal,
np.arange(6, dtype=np.float32).reshape((3, 2)),
None,
None,
np.array([[1.0, 0.0], [0.0, 1.0]]),
),
T(
dist.MultivariateNormal,
0.0,
None,
np.broadcast_to(np.identity(3), (2, 3, 3)),
None,
),
T(
dist.CAR,
1.2,
np.array([-0.2, 0.3]),
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.CAR,
np.array([0.0, 1.0, 3.0, 4.0]),
0.1,
np.array([0.3, 0.7]),
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
_SparseCAR,
np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),
0.0,
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.MultivariateStudentT,
15.0,
0.0,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[[1.0, 0.0], [0.5, 1.0]]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.arange(6, dtype=np.float32).reshape((3, 2)),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.ones(3),
np.broadcast_to(np.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.array(7.0),
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
np.arange(20, 22, dtype=jnp.float32),
np.ones(3),
np.broadcast_to(jnp.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),
np.ones(2),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.LowRankMultivariateNormal,
np.zeros(2),
np.array([[1.0], [0.0]]),
np.array([1.0, 1.0]),
),
T(
dist.LowRankMultivariateNormal,
np.arange(6, dtype=jnp.float32).reshape((2, 3)),
np.arange(6, dtype=jnp.float32).reshape((3, 2)),
np.array([1.0, 2.0, 3.0]),
),
T(dist.Normal, 0.0, 1.0),
T(dist.Normal, 1.0, np.array([1.0, 2.0])),
T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Pareto, 1.0, 2.0),
T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),
T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),
T(dist.RelaxedBernoulliLogits, 2.0, -10.0),
T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),
T(dist.SoftLaplace, 1.0, 1.0),
T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),
T(dist.StudentT, 1.0, 1.0, 0.5),
T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),
T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),
T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedCauchy,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),
T(_TruncatedNormal, 0.0, 1.0, -1.0, None),
T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),
T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),
T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),
T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
None,
np.array([-2.0, 2.0]),
),
T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),
T(dist.Uniform, 0.0, 2.0),
T(dist.Uniform, 1.0, np.array([2.0, 3.0])),
T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),
T(dist.Weibull, 0.2, 1.1),
T(dist.Weibull, 2.8, np.array([2.0, 2.0])),
T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(
_GaussianMixture,
np.ones(3) / 3.0,
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 7.7, 2.1]),
),
T(
_Gaussian2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
T(
_GeneralMixture,
np.array([0.2, 0.3, 0.5]),
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 1.7, 2.1]),
),
T(
_General2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
]
DIRECTIONAL = [
T(dist.VonMises, 2.0, 10.0),
T(dist.VonMises, 2.0, np.array([150.0, 10.0])),
T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),
pytest.param(
*T(
dist.SineBivariateVonMises,
0.0,
0.0,
5.0,
6.0,
2.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(
dist.SineBivariateVonMises,
3.003,
-1.343,
5.0,
6.0,
2.0,
),
pytest.param(
*T(
dist.SineBivariateVonMises,
-1.232,
-1.3430,
3.4,
2.0,
1.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(
dist.SineBivariateVonMises,
np.array([math.pi - 0.2, 1.0]),
np.array([0.0, 1.0]),
np.array([5.0, 5.0]),
np.array([7.0, 0.5]),
None,
np.array([0.5, 0.1]),
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.ProjectedNormal, np.array([0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),
T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),
T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),
T(SineSkewedVonMises, np.array([0.342355])),
T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),
]
DISCRETE = [
T(dist.BetaBinomial, 2.0, 5.0, 10),
T(
dist.BetaBinomial,
np.array([2.0, 4.0]),
np.array([5.0, 3.0]),
np.array([10, 12]),
),
T(dist.BernoulliProbs, 0.2),
T(dist.BernoulliProbs, np.array([0.2, 0.7])),
T(dist.BernoulliLogits, np.array([-1.0, 3.0])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),
T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),
T(dist.CategoricalProbs, np.array([1.0])),
T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),
T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),
T(dist.CategoricalLogits, np.array([-5.0])),
T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),
T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),
T(dist.Delta, 1),
T(dist.Delta, np.array([0.0, 2.0])),
T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),
T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),
T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),
T(dist.GammaPoisson, 2.0, 2.0),
T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),
T(dist.GeometricProbs, 0.2),
T(dist.GeometricProbs, np.array([0.2, 0.7])),
T(dist.GeometricLogits, np.array([-1.0, 3.0])),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),
T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),
T(dist.NegativeBinomialProbs, 10, 0.2),
T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),
T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),
T(
dist.NegativeBinomialProbs,
np.array([4.2, 10.7, 2.1]),
np.array([0.2, 0.6, 0.5]),
),
T(dist.NegativeBinomialLogits, 10, -2.1),
T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),
T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),
T(
dist.NegativeBinomialLogits,
np.array([4.2, 7.7, 2.1]),
np.array([4.2, 0.7, 2.1]),
),
T(dist.NegativeBinomial2, 0.3, 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),
T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),
T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),
T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),
T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),
T(dist.Poisson, 2.0),
T(dist.Poisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2.0),
T(SparsePoisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2),
T(dist.ZeroInflatedPoisson, 0.6, 2.0),
T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),
T(ZeroInflatedPoissonLogits, 2.0, 3.0),
T(
ZeroInflatedPoissonLogits,
np.array([0.2, 4.0, 0.3]),
np.array([2.0, -3.0, 5.0]),
),
]
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-6
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return (
multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
+ 1
)
elif constraint is constraints.corr_cholesky:
return (
signed_stick_breaking_tril(
random.uniform(
key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,),
minval=-1,
maxval=1,
)
)
+ 1e-2
)
elif constraint is constraints.corr_matrix:
cholesky = 1e-2 + signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if (
sp_dist
and not _is_batched_multivariate(jax_dist)
and not isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif (
sp_dist
and not _is_batched_multivariate(jax_dist)
and isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = (1) if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip("scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(
jax_dist.precision_matrix,
jnp.linalg.inv(jax_dist.covariance_matrix),
rtol=1e-6,
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, "shape", ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(
*shapes
)
except NotImplementedError:
pytest.skip(f"{type(jax_dist).__name__}.infer_shapes() is not implemented")
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
# we have pathwise gradient for gamma sampler
gamma_derived_params = {
"Gamma": ["concentration"],
"Beta": ["concentration1", "concentration0"],
"BetaProportion": ["mean", "concentration"],
"Chi2": ["df"],
"Dirichlet": ["concentration"],
"InverseGamma": ["concentration"],
"LKJ": ["concentration"],
"LKJCholesky": ["concentration"],
"StudentT": ["df"],
}.get(jax_dist.__name__, [])
dist_args = [
p
for p in (
inspect.getfullargspec(jax_dist.__init__)[0][1:]
if inspect.isclass(jax_dist)
# account the the case jax_dist is a function
else inspect.getfullargspec(jax_dist)[0]
)
]
params_dict = dict(zip(dist_args[: len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [
p for p in jax_class.reparametrized_params if p not in gamma_derived_params
]
if not reparametrized_params:
pytest.skip("{} not reparametrized.".format(jax_class.__name__))
nonrepara_params_dict = {
k: v for k, v in params_dict.items() if k not in reparametrized_params
}
repara_params = tuple(
v for k, v in params_dict.items() if k in reparametrized_params
)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(
jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)
)
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 1e-3
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)
@pytest.mark.parametrize(
"jax_dist, params",
[
(dist.Gamma, (1.0,)),
(dist.Gamma, (0.1,)),
(dist.Gamma, (10.0,)),
(dist.Chi2, (1.0,)),
(dist.Chi2, (0.1,)),
(dist.Chi2, (10.0,)),
(dist.Beta, (1.0, 1.0)),
(dist.StudentT, (5.0, 2.0, 4.0)),
],
)
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z**2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean**2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_jit_log_likelihood(jax_dist, sp_dist, params):
if jax_dist.__name__ in (
"EulerMaruyama",
"GaussianRandomWalk",
"_ImproperWrapper",
"LKJ",
"LKJCholesky",
"_SparseCAR",
):
pytest.xfail(reason="non-jittable params")
rng_key = random.PRNGKey(0)
samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))
def log_likelihood(*params):
return jax_dist(*params).log_prob(samples)
expected = log_likelihood(*params)
actual = jax.jit(log_likelihood)(*params)
assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("jit", [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
# new api
loc, scale, low, high = (
params[0].loc,
params[0].scale,
params[1],
params[2],
)
else:
# old api
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(
sp_dist.cdf(high) - sp_dist.cdf(low)
)
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
return
pytest.skip("no corresponding scipy distn.")
if _is_batched_multivariate(jax_dist):
pytest.skip("batching not allowed in multivariate distns.")
if jax_dist.event_shape and prepend_shape:
# >>> d = sp.dirichlet([1.1, 1.1])
# >>> samples = d.rvs(size=(2,))
# >>> d.logpdf(samples)
# ValueError: The input vector 'x' must lie within the normal simplex ...
pytest.skip("batched samples cannot be scored by multivariate distributions.")
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
# precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1
if "The input vector 'x' must lie within the normal simplex." in str(e):
samples = jax.device_get(samples).astype("float64")
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(
dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])
)
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params",
# TODO: add more complete pattern for Discrete.cdf
CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],
)
@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip("skip testing cdf/icdf methods of multivariate distributions")
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5
if d.shape() == () and not d.is_discrete:
assert_allclose(
jax.vmap(jax.grad(d.cdf))(samples),
jnp.exp(d.log_prob(samples)),
atol=1e-5,
rtol=rtol,
)
assert_allclose(
jax.vmap(jax.grad(d.icdf))(quantiles),
jnp.exp(-d.log_prob(d.icdf(quantiles))),
atol=1e-5,
rtol=rtol,
)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)
except NotImplementedError:
pass
# test against scipy
if not sp_dist:
pytest.skip("no corresponding scipy distn.")
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)
except NotImplementedError:
pass
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if "Improper" in jax_dist.__name__:
pytest.skip("distribution has improper .log_prob()")
if "LKJ" in jax_dist.__name__:
pytest.xfail("incorrect submanifold scaling")
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip("EulerMaruyama skip test when event shape is non-trivial.")
num_samples = 10000
if "BetaProportion" in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
# Test each batch independently.
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if "Dirichlet" in jax_dist.__name__:
# The Dirichlet density is over all but one of the probs.
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip("expensive test")
else:
assert gof > TEST_FAILURE_RATE
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
def test_independent_shape(jax_dist, sp_dist, params):
d = jax_dist(*params)
batch_shape, event_shape = d.batch_shape, d.event_shape
shape = batch_shape + event_shape
for i in range(len(batch_shape)):
indep = dist.Independent(d, reinterpreted_batch_ndims=i)
sample = indep.sample(random.PRNGKey(0))
event_boundary = len(shape) - len(event_shape) - i
assert indep.batch_shape == shape[:event_boundary]
assert indep.event_shape == shape[event_boundary:]
assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize("dimension", [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
# When concentration=1, the distribution of correlation matrices is uniform.
# We will test that fact here.
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(
jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)
)[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
# test if they are constant
assert_allclose(
corr_log_prob,
jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),
rtol=1e-6,
)
if dimension == 2:
# when concentration = 1, LKJ gives a uniform distribution over correlation matrix,
# hence for the case dimension = 2,
# density of a correlation matrix will be Uniform(-1, 1) = 0.5.
# In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its
# log value is 0) because the off-diagonal lower triangular element does not change
# in the transform.
# So target_log_prob = log(0.5)
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)
@pytest.mark.parametrize("dimension", [2, 3, 5])
@pytest.mark.parametrize("concentration", [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
# We will test against the fact that LKJCorrCholesky can be seen as a
# TransformedDistribution with base distribution is a distribution of partial
# correlations in C-vine method (modulo an affine transform to change domain from (0, 1)
# to (1, 0)) and transform is a signed stick-breaking process.
d = dist.LKJCholesky(dimension, concentration, sample_method="cvine")
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
# compute signed stick breaking logdet
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(
unconstrained, sample
)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(
0,
20,
(
1000,
100,
),
)
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_ZIP_log_prob(rate):
# if gate is 0 ZIP is Poisson
zip_ = dist.ZeroInflatedPoisson(0.0, rate)
pois = dist.Poisson(rate)
s = zip_.sample(random.PRNGKey(0), (20,))
zip_prob = zip_.log_prob(s)
pois_prob = pois.log_prob(s)
assert_allclose(zip_prob, pois_prob, rtol=1e-6)
# if gate is 1 ZIP is Delta(0)
zip_ = dist.ZeroInflatedPoisson(1.0, rate)
delta = dist.Delta(0.0)
s = np.array([0.0, 1.0])
zip_prob = zip_.log_prob(s)
delta_prob = delta.log_prob(s)
assert_allclose(zip_prob, delta_prob, rtol=1e-6)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(
value
)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("batch_shape", [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = (3,)
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
# test on one-hots
value = total_count * jnp.eye(event_shape[-1]).reshape(
event_shape + (1,) * len(batch_shape) + event_shape
)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(
gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape
)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip("we have separated tests for LKJCholesky distribution")
if jax_dist is _ImproperWrapper:
pytest.skip("no param for ImproperUniform to test for log_prob gradient")
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 1e-3
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
# skip taking grad w.r.t. sde_fn
continue
if jax_dist is _SparseCAR and i == 3:
# skip taking grad w.r.t. adj_matrix
continue
if isinstance(
params[i], dist.Distribution
): # skip taking grad w.r.t. base_dist
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
# grad w.r.t. `value` of Delta distribution will be 0
# but numerical value will give nan (= inf - inf)
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip("Improper distribution does not has mean/var implemented")
if jax_dist is FoldedNormal:
pytest.skip("Folded distribution does not has mean/var implemented")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama distribution does not has mean/var implemented")
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip("RelaxedBernoulli distribution does not has mean/var implemented")
if "SineSkewed" in jax_dist.__name__:
pytest.skip("Skewed Distribution are not symmetric about location.")
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
):
pytest.skip("Truncated distributions do not has mean/var implemented")
if jax_dist is dist.ProjectedNormal:
pytest.skip("Mean is defined in submanifold")
n = (
20000
if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]
else 200000
)
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
# check with suitable scipy implementation if available
# XXX: VonMises is already tested below
if (
sp_dist
and not _is_batched_multivariate(d_jax)
and jax_dist
not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]
):
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError: # mvn does not have .mean() method
sp_mean = d_sp.mean
# for multivariate distns try .cov first
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError: # mvn does not have .cov() method
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
# marginal of off-diagonal entries
marginal = dist.Beta(
concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)
)
# scale statistics due to linear mapping
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(
jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),
jnp.shape(marginal_mean) + d_jax.event_shape,
)
expected_std = jnp.broadcast_to(
jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),
jnp.shape(marginal_std) + d_jax.event_shape,
)
# diagonal elements of correlation matrices are 1
expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(
dimension
)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
# circular mean = sample mean
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)
# circular variance
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x**2 + y**2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(
d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = (200_000,)
# use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))
if len(d_jax.batch_shape) > 0:
axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[: len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
# mean
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])
jnp.allclose(
jnp.mean(samples_re[ix], 0),
jnp.squeeze(d_jax.mean[ix_loc]),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order="F"
)
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])
scale_tril = my_kron(
d_jax.scale_tril_column[ix_col],
d_jax.scale_tril_row[ix_row],
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else: # unbatched
# mean
jnp.allclose(
jnp.mean(samples, 0),
jnp.squeeze(d_jax.mean),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order="F")
scale_tril = my_kron(
jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if isinstance(d_jax, dist.CAR):
pytest.skip("CAR distribution does not have `variance` implemented.")
if isinstance(d_jax, dist.Gompertz):
pytest.skip("Gompertz distribution does not have `variance` implemented.")
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
_GaussianMixture,
_Gaussian2DMixture,
_GeneralMixture,
_General2DMixture,
):
pytest.skip(f"{jax_dist.__name__} is a function, not a class")
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if (
jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)
and dist_args[i] != "concentration"
):
continue
if "SineSkewed" in jax_dist.__name__ and dist_args[i] != "skewness":
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != "t":
continue
if (
jax_dist is dist.TwoSidedTruncatedDistribution
and dist_args[i] == "base_dist"
):
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == "num_steps":
continue
if (
jax_dist is dist.SineBivariateVonMises
and dist_args[i] == "weighted_correlation"
):
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(
constraint, jnp.shape(params[i]), key_gen
)
valid_params[i] = gen_values_within_bounds(
constraint, jnp.shape(params[i]), key_gen
)
if jax_dist is dist.MultivariateStudentT:
# As mean is only defined for df > 1 & we instantiate
# scipy.stats.multivariate_t with same mean as jax_dist
# we need to ensure this is defined, so force df >= 1
valid_params[0] += 1
if jax_dist is dist.LogUniform:
# scipy.stats.loguniform take parameter a and b
# which is a > 0 and b > a.
# gen_values_within_bounds() generates just
# a > 0 and b > 0. Then, make b = a + b.
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
# Invalid parameter values throw ValueError
if not dependent_constraint and (
jax_dist is not _ImproperWrapper and "SineSkewed" not in jax_dist.__name__
):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
# test error raised under jit omnistaging
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
# Test agreement of log density evaluation on randomly generated samples
# with scipy's implementation when available.
if (
sp_dist
and not _is_batched_multivariate(d)
and not (d.event_shape and prepend_shape)
):
valid_samples = gen_values_within_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)
# Out of support samples throw ValueError
oob_samples = gen_values_outside_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
with pytest.warns(UserWarning, match="Out-of-support"):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match="Out-of-support"):
# test warning work under jit omnistaging
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match="got invalid"):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match="Out-of-support"):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return (
dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()
)
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-6)
assert_allclose(grad_fx, grad_gx, atol=1e-4)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(
ValueError, match=r"^BetaProportion distribution got invalid mean parameter\.$"
):
dist.BetaProportion(1.0, 1.0)
########################################
# Tests for constraints and transforms #
########################################
@pytest.mark.parametrize(
"constraint, x, expected",
[
(constraints.boolean, np.array([True, False]), np.array([True, True])),
(constraints.boolean, np.array([1, 1]), np.array([True, True])),
(constraints.boolean, np.array([-1, 1]), np.array([False, True])),
(
constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_cholesky,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not positive_diagonal & not unit_norm_row
(
constraints.corr_matrix,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_matrix,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not unit diagonal
(constraints.greater_than(1), 3, True),
(
constraints.greater_than(1),
np.array([-1, 1, 5]),
np.array([False, False, True]),
),
(constraints.integer_interval(-3, 5), 0, True),
(
constraints.integer_interval(-3, 5),
np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False]),
),
(constraints.interval(-3, 5), 0, True),
(
constraints.interval(-3, 5),
np.array([-5, -3, 0, 5, 7]),
np.array([False, True, True, True, False]),
),
(constraints.less_than(1), -2, True),
(
constraints.less_than(1),
np.array([-1, 1, 5]),
np.array([True, False, False]),
),
(constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),
(
constraints.lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.nonnegative_integer, 3, True),
(
constraints.nonnegative_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, True, True]),
),
(constraints.positive, 3, True),
(constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),
(constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),
(
constraints.positive_definite,
np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),
np.array([False, False]),
),
(constraints.positive_integer, 3, True),
(
constraints.positive_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, False, True]),
),
(constraints.real, -1, True),
(
constraints.real,
np.array([np.inf, -np.inf, np.nan, np.pi]),
np.array([False, False, False, True]),
),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True),
(
constraints.simplex,
np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False]),
),
(constraints.softplus_positive, 3, True),
(
constraints.softplus_positive,
np.array([-1, 0, 5]),
np.array([False, False, True]),
),
(
constraints.softplus_lower_cholesky,
np.array([[1.0, 0.0], [-2.0, 0.1]]),
True,
),
(
constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.unit_interval, 0.1, True),
(
constraints.unit_interval,
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, True, True, True, False]),
),
(
constraints.sphere,
np.array([[1, 0, 0], [0.5, 0.5, 0]]),
np.array([True, False]),
),
(
constraints.open_interval(0.0, 1.0),
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, False, True, False, False]),
),
],
)
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == "float32" or jnp.result_type(v) == "float64":
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)
@pytest.mark.parametrize(
"constraint",
[
constraints.corr_cholesky,
constraints.corr_matrix,
constraints.greater_than(2),
constraints.interval(-3, 5),
constraints.l1_ball,
constraints.less_than(1),
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector,
constraints.positive,
constraints.positive_definite,
constraints.positive_ordered_vector,
constraints.real,
constraints.real_vector,
constraints.simplex,
constraints.softplus_positive,
constraints.softplus_lower_cholesky,
constraints.unit_interval,
constraints.open_interval(0.0, 1.0),
],
ids=lambda x: x.__class__,
)
@pytest.mark.parametrize("shape", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
# test inv work for NaN arrays:
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
# test codomain
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-5, rtol=1e-5)
# test domain, currently all is constraints.real or constraints.real_vector
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]
elif constraint in [
constraints.real_vector,
constraints.ordered_vector,
constraints.positive_ordered_vector,
constraints.l1_ball,
]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:
vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731
transform(x), diagonal=-1
)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
+ jnp.identity(matrix.shape[-1])
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
elif constraint in [
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.positive_definite,
constraints.softplus_lower_cholesky,
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
- jnp.diag(jnp.diag(matrix))
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)
assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)
# NB: skip transforms which are tested in `test_biject_to`
@pytest.mark.parametrize(
"transform, event_shape",
[
(PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),
(PowerTransform(2.0), ()),
(SoftplusTransform(), ()),
(
LowerCholeskyAffine(
np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])
),
(2,),
),
(
transforms.ComposeTransform(
[
biject_to(constraints.simplex),
SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv,
]
),
(5,),
),
],
)
@pytest.mark.parametrize(
"batch_shape",
[
(),
(1,),
(3,),
(6,),
(3, 1),
(1, 3),
(5, 3),
],
)
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
# test codomain
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-6, rtol=1e-4)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
# test domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-6)
assert_allclose(actual, -inv_expected, atol=1e-6)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = (
jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = (
jnp.log(2) * 6
+ t2.log_abs_det_jacobian(x * 2, z)
+ t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
@pytest.mark.parametrize("prepend_event_shape", [(), (4,)])
@pytest.mark.parametrize("sample_shape", [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):
base_dist = (
dist.Normal(0, 1)
.expand(batch_shape + prepend_event_shape + (6,))
.to_event(1 + len(prepend_event_shape))
)
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-5)
@pytest.mark.parametrize(
"transformed_dist",
[
dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()
),
dist.TransformedDistribution(
dist.Exponential(jnp.ones(2)),
[
transforms.PowerTransform(0.7),
transforms.AffineTransform(0.0, jnp.ones(2) * 3),
],
),
],
)
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(
random.PRNGKey(1)
)
assert_allclose(
transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample),
)
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(
dist.Normal(2, 3), transforms.PowerTransform(2.0)
)
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
def _make_iaf(input_dim, hidden_dims, rng_key):
arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])
_, init_params = arn_init(rng_key, (input_dim,))
return InverseAutoregressiveTransform(partial(arn, init_params))
@pytest.mark.parametrize(
"ts",
[
[transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],
[transforms.ExpTransform()],
[
transforms.ComposeTransform(
[transforms.AffineTransform(-2, 3), transforms.ExpTransform()]
),
transforms.PowerTransform(3.0),
],
[
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),
transforms.PermuteTransform(jnp.arange(5)[::-1]),
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),
],
],
)
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize("x_dim, y_dim", [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {"x": xy[:x_dim], "y": xy[x_dim:]} # noqa: E731
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match="UnpackTransform.inv"):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_generated_sample_distribution(
jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)
):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
"{} sampling method taken from upstream, no need to"
"test generated samples.".format(jax_dist.__name__)
)
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize(
"jax_dist, params, support",
[
(dist.BernoulliLogits, (5.0,), jnp.arange(2)),
(dist.BernoulliProbs, (0.5,), jnp.arange(2)),
(dist.BinomialLogits, (4.5, 10), jnp.arange(11)),
(dist.BinomialProbs, (0.5, 11), jnp.arange(12)),
(dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),
(dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),
(dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),
],
)
@pytest.mark.parametrize("batch_shape", [(5,), ()])
@pytest.mark.parametrize("expand", [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
@pytest.mark.parametrize("prepend_shape", [(), (2, 3)])
@pytest.mark.parametrize("sample_shape", [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape
# test expand of expand
assert (
expanded_dist.expand((3,) + new_batch_shape).batch_shape
== (3,) + new_batch_shape
)
# test expand error
if prepend_shape:
with pytest.raises(ValueError, match="Cannot broadcast distribution of shape"):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize("base_shape", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])
@pytest.mark.parametrize("event_dim", [0, 1, 2, 3])
@pytest.mark.parametrize("sample_shape", [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = (2, 3, 5)
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
@pytest.mark.parametrize("batch_shape", [(), (4,), (10, 3)])
def test_sine_bivariate_von_mises_batch_shape(batch_shape):
phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)
psi_loc = jnp.array(0.0)
phi_conc = jnp.array(1.0)
psi_conc = jnp.array(1.0)
corr = jnp.array(0.1)
sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)
assert sine.batch_shape == batch_shape
samples = sine.sample(random.PRNGKey(0))
assert samples.shape == (*batch_shape, 2)
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)
@pytest.mark.parametrize("batch_shape", [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
# test density approximately normalized
x = jnp.linspace(1.0e-6, d.truncation_point, num_points)
prob = (d.truncation_point / num_points) * jnp.exp(
logsumexp(d.log_prob(x), axis=-1)
)
assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)
# test mean of approximate sampler
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize(
"extra_event_dims,expand_shape",
[(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],
)
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = (4, 1, 1, 1, 6)
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
# Throws error when batch shape cannot be broadcasted
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
# Throws error when trying to shrink existing batch shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize(
"batch_shape, mask_shape",
[((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],
)
@pytest.mark.parametrize("event_shape", [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = (
dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))
)
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(
actual != 0,
jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),
)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(
jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))
)
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip("Cannot flattening ImproperUniform")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0) # this test for flatten/unflatten
lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan
# Test that parameters do not change after flattening.
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-6)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)
@pytest.mark.parametrize(
"method, arg", [("to_event", 1), ("mask", False), ("expand", [5])]
)
def test_special_dist_pytree(method, arg):
def f(x):
d = dist.Normal(np.zeros(1), np.ones(1))
return getattr(d, method)(arg)
jax.jit(f)(0)
lax.map(f, np.ones(3))
def test_expand_no_unnecessary_batch_shape_expansion():
# ExpandedDistribution can mutate the `batch_shape` of
# its base distribution in order to make ExpandedDistribution
# mappable, see #684. However, this mutation should not take
# place if no mapping operation is performed.
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
# Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)
# amounts to an identity operation.
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
# High-level test: `jax.jit`ting a function returning an ExpandedDistribution
# (which involves an instance of the low-level case as it will transform
# the original function by adding some flattening and unflattening steps)
# should return same object as its non-jitted equivalent.
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize(
"p_dist, q_dist",
[
(dist.Beta, dist.Beta),
(dist.Gamma, dist.Gamma),
(dist.Kumaraswamy, dist.Beta),
(dist.Normal, dist.Normal),
(dist.Weibull, dist.Gamma),
],
)
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f"Missing pattern for param {k}.")
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10_000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
# test that vmapped binomial with p = 0 does not have an infinite loop
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
def _get_vmappable_dist_init_params(jax_dist):
if jax_dist.__name__ == ("_TruncatedCauchy"):
return [2, 3]
elif jax_dist.__name__ == ("_TruncatedNormal"):
return [2, 3]
elif issubclass(jax_dist, dist.Distribution):
init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[
1:
]
vmap_over_parameters = list(
inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()
)[1:]
return list(
[
i
for i, name in enumerate(init_parameters)
if name in vmap_over_parameters
]
)
else:
raise ValueError
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[: len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
# In this case, since csr arrays are not jittable,
# _SparseCAR has a csr_matrix as part of its pytree
# definition (not as a pytree leaf). This causes pytree
# operations like tree_map to fail, since these functions
# compare the pytree def of each of the arguments using ==
# which is ambiguous for array-like objects.
return
in_out_axes_cases = [
# vmap over all args
(
tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),
0,
),
# vmap over a single arg, out over all attributes of a distribution
*(
([0 if i == idx else None for i in range(len(params))], 0)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, out over the associated attribute of the distribution
*(
(
[0 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 0}),
)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, axis=1, (out single attribute, axis=1)
*(
(
[1 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 1}),
)
for idx in vmappable_param_idxs
if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0
# skip this distribution because _GeneralMixture.__init__ turns
# 1d inputs into 0d attributes, thus breaks the expectations of
# the vmapping test case where in_axes=1, only done for rank>=1 tensors.
and jax_dist is not _GeneralMixture
),
]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [
jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)
if isinstance(ax, int)
else arg
for arg, ax in zip(params, in_axes)
]
# Recreate the jax_dist to avoid side effects coming from `d.sample`
# triggering lazy_property computations, which, in a few cases, break
# vmap_over's expectations regarding existing attributes to be vmapped.
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(
*batched_params
)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d
)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(
key
)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-6)
def test_normal_log_cdf():
# test if log_cdf method agrees with jax.scipy.stats.norm.logcdf
# and if exp(log_cdf) agrees with cdf
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)
@pytest.mark.parametrize(
"value",
[
-15.0,
jnp.array([[-15.0], [-10.0], [-5.0]]),
jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),
],
)
def test_truncated_normal_log_prob_in_tail(value):
# define set of distributions truncated in tail of distribution
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = (-20, -1.0)
a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(
value
)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
# test, if samples from distributions truncated in
# tail of distribution returns any inf's
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
|
normal
|
{
"blob_id": "c5e7fdcbd4a9281597a35a180f2853caac68f811",
"index": 7562,
"step-1": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n<mask token>\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n<mask token>\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-2": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-3": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected]('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected]('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\[email protected]('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected]('batch_shape', [(), (5,)])\[email protected]('prepend_event_shape', [(), (4,)])\[email protected]('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\n<mask token>\n\n\[email protected]('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected]('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected]('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\[email protected]('batch_shape', [(5,), ()])\[email protected]('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected]('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\[email protected]('event_dim', [0, 1, 2, 3])\[email protected]('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\[email protected]('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected]('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\[email protected]('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-4": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(\n angles), axis=0))\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected]('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected]('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\[email protected]('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected]('batch_shape', [(), (5,)])\[email protected]('prepend_event_shape', [(), (4,)])\[email protected]('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\[email protected]('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected]('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected]('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\[email protected]('batch_shape', [(5,), ()])\[email protected]('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected]('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\[email protected]('event_dim', [0, 1, 2, 3])\[email protected]('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\[email protected]('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected]('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\[email protected]('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-5": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom functools import partial\nimport inspect\nfrom itertools import product\nimport math\nimport os\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\nimport scipy\nfrom scipy.sparse import csr_matrix\nimport scipy.stats as osp\n\nimport jax\nfrom jax import grad, lax, vmap\nimport jax.numpy as jnp\nimport jax.random as random\nfrom jax.scipy.special import expit, logsumexp\nfrom jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm\n\nimport numpyro.distributions as dist\nfrom numpyro.distributions import (\n SineBivariateVonMises,\n constraints,\n kl_divergence,\n transforms,\n)\nfrom numpyro.distributions.batch_util import vmap_over\nfrom numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom\nfrom numpyro.distributions.flows import InverseAutoregressiveTransform\nfrom numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit\nfrom numpyro.distributions.transforms import (\n LowerCholeskyAffine,\n PermuteTransform,\n PowerTransform,\n SimplexToOrderedTransform,\n SoftplusTransform,\n biject_to,\n)\nfrom numpyro.distributions.util import (\n matrix_to_tril_vec,\n multinomial,\n signed_stick_breaking_tril,\n sum_rightmost,\n vec_to_tril_matrix,\n)\nfrom numpyro.nn import AutoregressiveNN\n\nTEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(\n jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)\n )\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\ndef sde_fn2(xy, _):\n tau, a = 2.0, 1.1\n x, y = xy[0], xy[1]\n dx = tau * (x - x**3.0 / 3.0 + y)\n dy = (1.0 / tau) * (a - x)\n dxy = jnp.vstack([dx, dy]).reshape(xy.shape)\n\n sigma2 = 0.1\n return dxy, sigma2\n\n\nclass T(namedtuple(\"TestCase\", [\"jax_dist\", \"sp_dist\", \"params\"])):\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < \"1.6.0\":\n pytest.skip(\n \"Multivariate Student-T distribution is not available in scipy < 1.6\"\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _truncnorm_to_scipy(loc, scale, low, high):\n if low is None:\n a = -np.inf\n else:\n a = (low - loc) / scale\n if high is None:\n b = np.inf\n else:\n b = (high - loc) / scale\n return osp.truncnorm(a, b, loc=loc, scale=scale)\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n_TruncatedNormal.arg_constraints = {}\n_TruncatedNormal.reparametrized_params = []\n_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0]), np.array([1.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(\n self: SineSkewedVonMisesBatched, skewness=None\n):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(\n self.component_distribution, loc=loc, scale=scale\n )\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(\n loc=loc, covariance_matrix=covariance_matrix\n )\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [\n dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [\n vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [\n dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)\n for loc_, covariance_matrix in zip(locs, covariance_matrices)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [\n vmap_over(d, loc=locs) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {\"rate\": constraints.positive, \"gate_logits\": constraints.real}\n pytree_data_fields = (\"rate\",)\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(\n self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None\n):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(\n self,\n base_dist=vmap_over(self.base_dist, rate=rate),\n gate_logits=gate_logits,\n gate=gate_logits,\n )\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {\"loc\": constraints.real, \"scale\": constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: \"FoldedNormal\", loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(\n self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)\n )\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = [\"loc\", \"correlation\", \"conditional_precision\"]\n\n def __init__(\n self,\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n *,\n is_sparse=True,\n validate_args=None,\n ):\n super().__init__(\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n is_sparse=True,\n validate_args=validate_args,\n )\n\n\n_DIST_MAP = {\n dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(\n asymmetry, loc=loc, scale=scale\n ),\n dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),\n dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),\n dist.Beta: lambda con1, con0: osp.beta(con1, con0),\n dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),\n dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),\n dist.BinomialLogits: lambda logits, total_count: osp.binom(\n n=total_count, p=_to_probs_bernoulli(logits)\n ),\n dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),\n dist.Chi2: lambda df: osp.chi2(df),\n dist.Dirichlet: lambda conc: osp.dirichlet(conc),\n dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),\n dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),\n dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),\n dist.GeometricLogits: lambda logits: osp.geom(\n p=_to_probs_bernoulli(logits), loc=-1\n ),\n dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),\n dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),\n dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),\n dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),\n dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),\n dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),\n dist.LogUniform: lambda a, b: osp.loguniform(a, b),\n dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(\n n=total_count, p=probs\n ),\n dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(\n n=total_count, p=_to_probs_multinom(logits)\n ),\n dist.MultivariateNormal: _mvn_to_scipy,\n dist.MultivariateStudentT: _multivariate_t_to_scipy,\n dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,\n dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),\n dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),\n dist.Poisson: lambda rate: osp.poisson(rate),\n dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),\n dist.Uniform: lambda a, b: osp.uniform(a, b - a),\n dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),\n dist.VonMises: lambda loc, conc: osp.vonmises(\n loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)\n ),\n dist.Weibull: lambda scale, conc: osp.weibull_min(\n c=conc,\n scale=scale,\n ),\n _TruncatedNormal: _truncnorm_to_scipy,\n}\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\nCONTINUOUS = [\n T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),\n T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),\n T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),\n T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),\n T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),\n T(\n dist.AsymmetricLaplaceQuantile,\n np.array([[1.0], [2.0]]),\n 2.0,\n np.array([0.2, 0.8]),\n ),\n T(dist.Beta, 0.2, 1.1),\n T(dist.Beta, 1.0, np.array([2.0, 2.0])),\n T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(dist.BetaProportion, 0.2, 10.0),\n T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),\n T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),\n T(dist.Chi2, 2.0),\n T(dist.Chi2, np.array([0.3, 1.3])),\n T(dist.Cauchy, 0.0, 1.0),\n T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),\n T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Dirichlet, np.array([1.7])),\n T(dist.Dirichlet, np.array([0.2, 1.1])),\n T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn1,\n dist.Normal(0.1, 1.0),\n ),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),\n ),\n T(dist.Exponential, 2.0),\n T(dist.Exponential, np.array([4.0, 2.0])),\n T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.GaussianRandomWalk, 0.1, 10),\n T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),\n T(\n dist.GaussianCopulaBeta,\n np.array([7.0, 2.0]),\n np.array([4.0, 10.0]),\n np.array([[1.0, 0.75], [0.75, 1.0]]),\n ),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),\n T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Gumbel, 0.0, 1.0),\n T(dist.Gumbel, 0.5, 2.0),\n T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),\n T(FoldedNormal, 2.0, 4.0),\n T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.HalfCauchy, 1.0),\n T(dist.HalfCauchy, np.array([1.0, 2.0])),\n T(dist.HalfNormal, 1.0),\n T(dist.HalfNormal, np.array([1.0, 2.0])),\n T(_ImproperWrapper, constraints.positive, (), (3,)),\n T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),\n T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Kumaraswamy, 0.6, 0.5),\n T(dist.Laplace, 0.0, 1.0),\n T(dist.Laplace, 0.5, np.array([1.0, 2.5])),\n T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),\n T(dist.LKJ, 2, 0.5, \"onion\"),\n T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n T(dist.LKJCholesky, 2, 0.5, \"onion\"),\n T(dist.LKJCholesky, 2, 0.5, \"cvine\"),\n T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"onion\"),\n pytest.param(\n *T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"onion\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"cvine\"),\n T(dist.Logistic, 0.0, 1.0),\n T(dist.Logistic, 1.0, np.array([1.0, 2.0])),\n T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.LogNormal, 1.0, 0.2),\n T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),\n T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),\n T(dist.LogUniform, 1.0, 2.0),\n T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),\n T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(6).reshape(3, 2),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),\n np.array([[1.0, 0], [0.4, 1]]),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(12).reshape((2, 3, 2)),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),\n np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(36).reshape((2, 3, 3, 2)),\n np.identity(3),\n np.identity(2),\n ),\n T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[1.0, 0.5], [0.5, 1.0]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[[1.0, 0.5], [0.5, 1.0]]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([2.0]),\n None,\n None,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n None,\n None,\n np.array([[1.0, 0.0], [0.0, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n 0.0,\n None,\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n None,\n ),\n T(\n dist.CAR,\n 1.2,\n np.array([-0.2, 0.3]),\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.CAR,\n np.array([0.0, 1.0, 3.0, 4.0]),\n 0.1,\n np.array([0.3, 0.7]),\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n _SparseCAR,\n np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),\n 0.0,\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n 0.0,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[[1.0, 0.0], [0.5, 1.0]]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.ones(3),\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.array(7.0),\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 22, dtype=jnp.float32),\n np.ones(3),\n np.broadcast_to(jnp.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),\n np.ones(2),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.zeros(2),\n np.array([[1.0], [0.0]]),\n np.array([1.0, 1.0]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.arange(6, dtype=jnp.float32).reshape((2, 3)),\n np.arange(6, dtype=jnp.float32).reshape((3, 2)),\n np.array([1.0, 2.0, 3.0]),\n ),\n T(dist.Normal, 0.0, 1.0),\n T(dist.Normal, 1.0, np.array([1.0, 2.0])),\n T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Pareto, 1.0, 2.0),\n T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),\n T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),\n T(dist.RelaxedBernoulliLogits, 2.0, -10.0),\n T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),\n T(dist.SoftLaplace, 1.0, 1.0),\n T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.StudentT, 1.0, 1.0, 0.5),\n T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),\n T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),\n T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedCauchy,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),\n T(_TruncatedNormal, 0.0, 1.0, -1.0, None),\n T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),\n T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),\n T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),\n T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n None,\n np.array([-2.0, 2.0]),\n ),\n T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),\n T(dist.Uniform, 0.0, 2.0),\n T(dist.Uniform, 1.0, np.array([2.0, 3.0])),\n T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),\n T(dist.Weibull, 0.2, 1.1),\n T(dist.Weibull, 2.8, np.array([2.0, 2.0])),\n T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(\n _GaussianMixture,\n np.ones(3) / 3.0,\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 7.7, 2.1]),\n ),\n T(\n _Gaussian2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n T(\n _GeneralMixture,\n np.array([0.2, 0.3, 0.5]),\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 1.7, 2.1]),\n ),\n T(\n _General2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n]\n\nDIRECTIONAL = [\n T(dist.VonMises, 2.0, 10.0),\n T(dist.VonMises, 2.0, np.array([150.0, 10.0])),\n T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n 0.0,\n 0.0,\n 5.0,\n 6.0,\n 2.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(\n dist.SineBivariateVonMises,\n 3.003,\n -1.343,\n 5.0,\n 6.0,\n 2.0,\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n -1.232,\n -1.3430,\n 3.4,\n 2.0,\n 1.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n np.array([math.pi - 0.2, 1.0]),\n np.array([0.0, 1.0]),\n np.array([5.0, 5.0]),\n np.array([7.0, 0.5]),\n None,\n np.array([0.5, 0.1]),\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.ProjectedNormal, np.array([0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),\n T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),\n T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),\n T(SineSkewedVonMises, np.array([0.342355])),\n T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),\n]\n\nDISCRETE = [\n T(dist.BetaBinomial, 2.0, 5.0, 10),\n T(\n dist.BetaBinomial,\n np.array([2.0, 4.0]),\n np.array([5.0, 3.0]),\n np.array([10, 12]),\n ),\n T(dist.BernoulliProbs, 0.2),\n T(dist.BernoulliProbs, np.array([0.2, 0.7])),\n T(dist.BernoulliLogits, np.array([-1.0, 3.0])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),\n T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),\n T(dist.CategoricalProbs, np.array([1.0])),\n T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),\n T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),\n T(dist.CategoricalLogits, np.array([-5.0])),\n T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),\n T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),\n T(dist.Delta, 1),\n T(dist.Delta, np.array([0.0, 2.0])),\n T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),\n T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),\n T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),\n T(dist.GammaPoisson, 2.0, 2.0),\n T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),\n T(dist.GeometricProbs, 0.2),\n T(dist.GeometricProbs, np.array([0.2, 0.7])),\n T(dist.GeometricLogits, np.array([-1.0, 3.0])),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),\n T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),\n T(dist.NegativeBinomialProbs, 10, 0.2),\n T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),\n T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),\n T(\n dist.NegativeBinomialProbs,\n np.array([4.2, 10.7, 2.1]),\n np.array([0.2, 0.6, 0.5]),\n ),\n T(dist.NegativeBinomialLogits, 10, -2.1),\n T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),\n T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),\n T(\n dist.NegativeBinomialLogits,\n np.array([4.2, 7.7, 2.1]),\n np.array([4.2, 0.7, 2.1]),\n ),\n T(dist.NegativeBinomial2, 0.3, 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),\n T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),\n T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),\n T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),\n T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),\n T(dist.Poisson, 2.0),\n T(dist.Poisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2.0),\n T(SparsePoisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2),\n T(dist.ZeroInflatedPoisson, 0.6, 2.0),\n T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),\n T(ZeroInflatedPoissonLogits, 2.0, 3.0),\n T(\n ZeroInflatedPoissonLogits,\n np.array([0.2, 4.0, 0.3]),\n np.array([2.0, -3.0, 5.0]),\n ),\n]\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-6\n\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return (\n multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n + 1\n )\n elif constraint is constraints.corr_cholesky:\n return (\n signed_stick_breaking_tril(\n random.uniform(\n key,\n size[:-2] + (size[-1] * (size[-1] - 1) // 2,),\n minval=-1,\n maxval=1,\n )\n )\n + 1e-2\n )\n elif constraint is constraints.corr_matrix:\n cholesky = 1e-2 + signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and not isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = (1) if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(\n jax_dist.precision_matrix,\n jnp.linalg.inv(jax_dist.covariance_matrix),\n rtol=1e-6,\n )\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, \"shape\", ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(\n *shapes\n )\n except NotImplementedError:\n pytest.skip(f\"{type(jax_dist).__name__}.infer_shapes() is not implemented\")\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected](\"batch_shape\", [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n # we have pathwise gradient for gamma sampler\n gamma_derived_params = {\n \"Gamma\": [\"concentration\"],\n \"Beta\": [\"concentration1\", \"concentration0\"],\n \"BetaProportion\": [\"mean\", \"concentration\"],\n \"Chi2\": [\"df\"],\n \"Dirichlet\": [\"concentration\"],\n \"InverseGamma\": [\"concentration\"],\n \"LKJ\": [\"concentration\"],\n \"LKJCholesky\": [\"concentration\"],\n \"StudentT\": [\"df\"],\n }.get(jax_dist.__name__, [])\n\n dist_args = [\n p\n for p in (\n inspect.getfullargspec(jax_dist.__init__)[0][1:]\n if inspect.isclass(jax_dist)\n # account the the case jax_dist is a function\n else inspect.getfullargspec(jax_dist)[0]\n )\n ]\n params_dict = dict(zip(dist_args[: len(params)], params))\n\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [\n p for p in jax_class.reparametrized_params if p not in gamma_derived_params\n ]\n if not reparametrized_params:\n pytest.skip(\"{} not reparametrized.\".format(jax_class.__name__))\n\n nonrepara_params_dict = {\n k: v for k, v in params_dict.items() if k not in reparametrized_params\n }\n repara_params = tuple(\n v for k, v in params_dict.items() if k in reparametrized_params\n )\n\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(\n jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)\n )\n\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n\n eps = 1e-3\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)\n\n\[email protected](\n \"jax_dist, params\",\n [\n (dist.Gamma, (1.0,)),\n (dist.Gamma, (0.1,)),\n (dist.Gamma, (10.0,)),\n (dist.Chi2, (1.0,)),\n (dist.Chi2, (0.1,)),\n (dist.Chi2, (10.0,)),\n (dist.Beta, (1.0, 1.0)),\n (dist.StudentT, (5.0, 2.0, 4.0)),\n ],\n)\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z**2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean**2\n\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_jit_log_likelihood(jax_dist, sp_dist, params):\n if jax_dist.__name__ in (\n \"EulerMaruyama\",\n \"GaussianRandomWalk\",\n \"_ImproperWrapper\",\n \"LKJ\",\n \"LKJCholesky\",\n \"_SparseCAR\",\n ):\n pytest.xfail(reason=\"non-jittable params\")\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))\n\n def log_likelihood(*params):\n return jax_dist(*params).log_prob(samples)\n\n expected = log_likelihood(*params)\n actual = jax.jit(log_likelihood)(*params)\n assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\[email protected](\"jit\", [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n )\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n # new api\n loc, scale, low, high = (\n params[0].loc,\n params[0].scale,\n params[1],\n params[2],\n )\n else:\n # old api\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(\n sp_dist.cdf(high) - sp_dist.cdf(low)\n )\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n return\n pytest.skip(\"no corresponding scipy distn.\")\n if _is_batched_multivariate(jax_dist):\n pytest.skip(\"batching not allowed in multivariate distns.\")\n if jax_dist.event_shape and prepend_shape:\n # >>> d = sp.dirichlet([1.1, 1.1])\n # >>> samples = d.rvs(size=(2,))\n # >>> d.logpdf(samples)\n # ValueError: The input vector 'x' must lie within the normal simplex ...\n pytest.skip(\"batched samples cannot be scored by multivariate distributions.\")\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n # precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e):\n samples = jax.device_get(samples).astype(\"float64\")\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(\n dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])\n )\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected](\n \"jax_dist, sp_dist, params\",\n # TODO: add more complete pattern for Discrete.cdf\n CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],\n)\[email protected](\"ignore:overflow encountered:RuntimeWarning\")\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\"skip testing cdf/icdf methods of multivariate distributions\")\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5\n if d.shape() == () and not d.is_discrete:\n assert_allclose(\n jax.vmap(jax.grad(d.cdf))(samples),\n jnp.exp(d.log_prob(samples)),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(\n jax.vmap(jax.grad(d.icdf))(quantiles),\n jnp.exp(-d.log_prob(d.icdf(quantiles))),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)\n except NotImplementedError:\n pass\n\n # test against scipy\n if not sp_dist:\n pytest.skip(\"no corresponding scipy distn.\")\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)\n except NotImplementedError:\n pass\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if \"Improper\" in jax_dist.__name__:\n pytest.skip(\"distribution has improper .log_prob()\")\n if \"LKJ\" in jax_dist.__name__:\n pytest.xfail(\"incorrect submanifold scaling\")\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\"EulerMaruyama skip test when event shape is non-trivial.\")\n\n num_samples = 10000\n if \"BetaProportion\" in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n\n # Test each batch independently.\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if \"Dirichlet\" in jax_dist.__name__:\n # The Dirichlet density is over all but one of the probs.\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip(\"expensive test\")\n else:\n assert gof > TEST_FAILURE_RATE\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\ndef test_independent_shape(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n batch_shape, event_shape = d.batch_shape, d.event_shape\n shape = batch_shape + event_shape\n for i in range(len(batch_shape)):\n indep = dist.Independent(d, reinterpreted_batch_ndims=i)\n sample = indep.sample(random.PRNGKey(0))\n event_boundary = len(shape) - len(event_shape) - i\n assert indep.batch_shape == shape[:event_boundary]\n assert indep.event_shape == shape[event_boundary:]\n assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected](\"dimension\", [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n # When concentration=1, the distribution of correlation matrices is uniform.\n # We will test that fact here.\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(\n jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)\n )[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n\n corr_log_prob = np.array(corr_log_prob)\n # test if they are constant\n assert_allclose(\n corr_log_prob,\n jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),\n rtol=1e-6,\n )\n\n if dimension == 2:\n # when concentration = 1, LKJ gives a uniform distribution over correlation matrix,\n # hence for the case dimension = 2,\n # density of a correlation matrix will be Uniform(-1, 1) = 0.5.\n # In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its\n # log value is 0) because the off-diagonal lower triangular element does not change\n # in the transform.\n # So target_log_prob = log(0.5)\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)\n\n\[email protected](\"dimension\", [2, 3, 5])\[email protected](\"concentration\", [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n # We will test against the fact that LKJCorrCholesky can be seen as a\n # TransformedDistribution with base distribution is a distribution of partial\n # correlations in C-vine method (modulo an affine transform to change domain from (0, 1)\n # to (1, 0)) and transform is a signed stick-breaking process.\n d = dist.LKJCholesky(dimension, concentration, sample_method=\"cvine\")\n\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n\n # compute signed stick breaking logdet\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(\n unconstrained, sample\n )\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)\n\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(\n 0,\n 20,\n (\n 1000,\n 100,\n ),\n )\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\[email protected](\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_ZIP_log_prob(rate):\n # if gate is 0 ZIP is Poisson\n zip_ = dist.ZeroInflatedPoisson(0.0, rate)\n pois = dist.Poisson(rate)\n s = zip_.sample(random.PRNGKey(0), (20,))\n zip_prob = zip_.log_prob(s)\n pois_prob = pois.log_prob(s)\n assert_allclose(zip_prob, pois_prob, rtol=1e-6)\n\n # if gate is 1 ZIP is Delta(0)\n zip_ = dist.ZeroInflatedPoisson(1.0, rate)\n delta = dist.Delta(0.0)\n s = np.array([0.0, 1.0])\n zip_prob = zip_.log_prob(s)\n delta_prob = delta.log_prob(s)\n assert_allclose(zip_prob, delta_prob, rtol=1e-6)\n\n\[email protected](\"total_count\", [1, 2, 3, 10])\[email protected](\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(\n value\n )\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected](\"total_count\", [1, 2, 3, 10])\[email protected](\"batch_shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = (3,)\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n # test on one-hots\n value = total_count * jnp.eye(event_shape[-1]).reshape(\n event_shape + (1,) * len(batch_shape) + event_shape\n )\n\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n\n num_samples = 300000\n poisson_rate = np.random.gamma(\n gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape\n )\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip(\"we have separated tests for LKJCholesky distribution\")\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"no param for ImproperUniform to test for log_prob gradient\")\n\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n\n eps = 1e-3\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n # skip taking grad w.r.t. sde_fn\n continue\n if jax_dist is _SparseCAR and i == 3:\n # skip taking grad w.r.t. adj_matrix\n continue\n if isinstance(\n params[i], dist.Distribution\n ): # skip taking grad w.r.t. base_dist\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n # grad w.r.t. `value` of Delta distribution will be 0\n # but numerical value will give nan (= inf - inf)\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Improper distribution does not has mean/var implemented\")\n if jax_dist is FoldedNormal:\n pytest.skip(\"Folded distribution does not has mean/var implemented\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama distribution does not has mean/var implemented\")\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\"RelaxedBernoulli distribution does not has mean/var implemented\")\n if \"SineSkewed\" in jax_dist.__name__:\n pytest.skip(\"Skewed Distribution are not symmetric about location.\")\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n ):\n pytest.skip(\"Truncated distributions do not has mean/var implemented\")\n if jax_dist is dist.ProjectedNormal:\n pytest.skip(\"Mean is defined in submanifold\")\n\n n = (\n 20000\n if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]\n else 200000\n )\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n # check with suitable scipy implementation if available\n # XXX: VonMises is already tested below\n if (\n sp_dist\n and not _is_batched_multivariate(d_jax)\n and jax_dist\n not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]\n ):\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError: # mvn does not have .mean() method\n sp_mean = d_sp.mean\n # for multivariate distns try .cov first\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError: # mvn does not have .cov() method\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n # marginal of off-diagonal entries\n marginal = dist.Beta(\n concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)\n )\n # scale statistics due to linear mapping\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(\n jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),\n jnp.shape(marginal_mean) + d_jax.event_shape,\n )\n expected_std = jnp.broadcast_to(\n jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),\n jnp.shape(marginal_std) + d_jax.event_shape,\n )\n # diagonal elements of correlation matrices are 1\n expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(\n dimension\n )\n expected_std = expected_std * (1 - jnp.identity(dimension))\n\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n # circular mean = sample mean\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)\n\n # circular variance\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n\n expected_variance = 1 - jnp.sqrt(x**2 + y**2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n\n assert_allclose(\n d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = (200_000,)\n # use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))\n if len(d_jax.batch_shape) > 0:\n axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[: len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n # mean\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n\n ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])\n jnp.allclose(\n jnp.mean(samples_re[ix], 0),\n jnp.squeeze(d_jax.mean[ix_loc]),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order=\"F\"\n )\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])\n scale_tril = my_kron(\n d_jax.scale_tril_column[ix_col],\n d_jax.scale_tril_row[ix_row],\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else: # unbatched\n # mean\n jnp.allclose(\n jnp.mean(samples, 0),\n jnp.squeeze(d_jax.mean),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order=\"F\")\n scale_tril = my_kron(\n jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\"CAR distribution does not have `variance` implemented.\")\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\"Gompertz distribution does not have `variance` implemented.\")\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n _GaussianMixture,\n _Gaussian2DMixture,\n _GeneralMixture,\n _General2DMixture,\n ):\n pytest.skip(f\"{jax_dist.__name__} is a function, not a class\")\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if (\n jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)\n and dist_args[i] != \"concentration\"\n ):\n continue\n if \"SineSkewed\" in jax_dist.__name__ and dist_args[i] != \"skewness\":\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != \"t\":\n continue\n if (\n jax_dist is dist.TwoSidedTruncatedDistribution\n and dist_args[i] == \"base_dist\"\n ):\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == \"num_steps\":\n continue\n if (\n jax_dist is dist.SineBivariateVonMises\n and dist_args[i] == \"weighted_correlation\"\n ):\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n valid_params[i] = gen_values_within_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n if jax_dist is dist.MultivariateStudentT:\n # As mean is only defined for df > 1 & we instantiate\n # scipy.stats.multivariate_t with same mean as jax_dist\n # we need to ensure this is defined, so force df >= 1\n valid_params[0] += 1\n\n if jax_dist is dist.LogUniform:\n # scipy.stats.loguniform take parameter a and b\n # which is a > 0 and b > a.\n # gen_values_within_bounds() generates just\n # a > 0 and b > 0. Then, make b = a + b.\n valid_params[1] += valid_params[0]\n\n assert jax_dist(*oob_params)\n\n # Invalid parameter values throw ValueError\n if not dependent_constraint and (\n jax_dist is not _ImproperWrapper and \"SineSkewed\" not in jax_dist.__name__\n ):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n\n with pytest.raises(ValueError):\n # test error raised under jit omnistaging\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n\n jax.jit(dist_gen_fn)()\n\n d = jax_dist(*valid_params, validate_args=True)\n\n # Test agreement of log density evaluation on randomly generated samples\n # with scipy's implementation when available.\n if (\n sp_dist\n and not _is_batched_multivariate(d)\n and not (d.event_shape and prepend_shape)\n ):\n valid_samples = gen_values_within_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)\n\n # Out of support samples throw ValueError\n oob_samples = gen_values_outside_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n d.log_prob(oob_samples)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n # test warning work under jit omnistaging\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n\n with pytest.raises(ValueError, match=\"got invalid\"):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return (\n dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()\n )\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()\n\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-6)\n assert_allclose(grad_fx, grad_gx, atol=1e-4)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(\n ValueError, match=r\"^BetaProportion distribution got invalid mean parameter\\.$\"\n ):\n dist.BetaProportion(1.0, 1.0)\n\n\n########################################\n# Tests for constraints and transforms #\n########################################\n\n\[email protected](\n \"constraint, x, expected\",\n [\n (constraints.boolean, np.array([True, False]), np.array([True, True])),\n (constraints.boolean, np.array([1, 1]), np.array([True, True])),\n (constraints.boolean, np.array([-1, 1]), np.array([False, True])),\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not positive_diagonal & not unit_norm_row\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not unit diagonal\n (constraints.greater_than(1), 3, True),\n (\n constraints.greater_than(1),\n np.array([-1, 1, 5]),\n np.array([False, False, True]),\n ),\n (constraints.integer_interval(-3, 5), 0, True),\n (\n constraints.integer_interval(-3, 5),\n np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False]),\n ),\n (constraints.interval(-3, 5), 0, True),\n (\n constraints.interval(-3, 5),\n np.array([-5, -3, 0, 5, 7]),\n np.array([False, True, True, True, False]),\n ),\n (constraints.less_than(1), -2, True),\n (\n constraints.less_than(1),\n np.array([-1, 1, 5]),\n np.array([True, False, False]),\n ),\n (constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),\n (\n constraints.lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.nonnegative_integer, 3, True),\n (\n constraints.nonnegative_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, True, True]),\n ),\n (constraints.positive, 3, True),\n (constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),\n (constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),\n (\n constraints.positive_definite,\n np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),\n np.array([False, False]),\n ),\n (constraints.positive_integer, 3, True),\n (\n constraints.positive_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, False, True]),\n ),\n (constraints.real, -1, True),\n (\n constraints.real,\n np.array([np.inf, -np.inf, np.nan, np.pi]),\n np.array([False, False, False, True]),\n ),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True),\n (\n constraints.simplex,\n np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False]),\n ),\n (constraints.softplus_positive, 3, True),\n (\n constraints.softplus_positive,\n np.array([-1, 0, 5]),\n np.array([False, False, True]),\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[1.0, 0.0], [-2.0, 0.1]]),\n True,\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.unit_interval, 0.1, True),\n (\n constraints.unit_interval,\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, True, True, True, False]),\n ),\n (\n constraints.sphere,\n np.array([[1, 0, 0], [0.5, 0.5, 0]]),\n np.array([True, False]),\n ),\n (\n constraints.open_interval(0.0, 1.0),\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, False, True, False, False]),\n ),\n ],\n)\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == \"float32\" or jnp.result_type(v) == \"float64\":\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))\n\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)\n\n\[email protected](\n \"constraint\",\n [\n constraints.corr_cholesky,\n constraints.corr_matrix,\n constraints.greater_than(2),\n constraints.interval(-3, 5),\n constraints.l1_ball,\n constraints.less_than(1),\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector,\n constraints.positive,\n constraints.positive_definite,\n constraints.positive_ordered_vector,\n constraints.real,\n constraints.real_vector,\n constraints.simplex,\n constraints.softplus_positive,\n constraints.softplus_lower_cholesky,\n constraints.unit_interval,\n constraints.open_interval(0.0, 1.0),\n ],\n ids=lambda x: x.__class__,\n)\[email protected](\"shape\", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n\n # test inv work for NaN arrays:\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n\n # test codomain\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-5, rtol=1e-5)\n\n # test domain, currently all is constraints.real or constraints.real_vector\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]\n elif constraint in [\n constraints.real_vector,\n constraints.ordered_vector,\n constraints.positive_ordered_vector,\n constraints.l1_ball,\n ]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:\n vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731\n transform(x), diagonal=-1\n )\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n + jnp.identity(matrix.shape[-1])\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n elif constraint in [\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.positive_definite,\n constraints.softplus_lower_cholesky,\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n - jnp.diag(jnp.diag(matrix))\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)\n assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)\n\n\n# NB: skip transforms which are tested in `test_biject_to`\[email protected](\n \"transform, event_shape\",\n [\n (PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),\n (PowerTransform(2.0), ()),\n (SoftplusTransform(), ()),\n (\n LowerCholeskyAffine(\n np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])\n ),\n (2,),\n ),\n (\n transforms.ComposeTransform(\n [\n biject_to(constraints.simplex),\n SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv,\n ]\n ),\n (5,),\n ),\n ],\n)\[email protected](\n \"batch_shape\",\n [\n (),\n (1,),\n (3,),\n (6,),\n (3, 1),\n (1, 3),\n (5, 3),\n ],\n)\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n\n # test codomain\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-6, rtol=1e-4)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n\n # test domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-6)\n assert_allclose(actual, -inv_expected, atol=1e-6)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = (\n jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9\n )\n assert_allclose(log_det, expected_log_det)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = (\n jnp.log(2) * 6\n + t2.log_abs_det_jacobian(x * 2, z)\n + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n )\n assert_allclose(log_det, expected_log_det)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected](\"batch_shape\", [(), (5,)])\[email protected](\"prepend_event_shape\", [(), (4,)])\[email protected](\"sample_shape\", [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):\n base_dist = (\n dist.Normal(0, 1)\n .expand(batch_shape + prepend_event_shape + (6,))\n .to_event(1 + len(prepend_event_shape))\n )\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-5)\n\n\[email protected](\n \"transformed_dist\",\n [\n dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()\n ),\n dist.TransformedDistribution(\n dist.Exponential(jnp.ones(2)),\n [\n transforms.PowerTransform(0.7),\n transforms.AffineTransform(0.0, jnp.ones(2) * 3),\n ],\n ),\n ],\n)\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(\n random.PRNGKey(1)\n )\n assert_allclose(\n transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample),\n )\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(\n dist.Normal(2, 3), transforms.PowerTransform(2.0)\n )\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\[email protected](\n \"ts\",\n [\n [transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],\n [transforms.ExpTransform()],\n [\n transforms.ComposeTransform(\n [transforms.AffineTransform(-2, 3), transforms.ExpTransform()]\n ),\n transforms.PowerTransform(3.0),\n ],\n [\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),\n transforms.PermuteTransform(jnp.arange(5)[::-1]),\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),\n ],\n ],\n)\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected](\"x_dim, y_dim\", [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {\"x\": xy[:x_dim], \"y\": xy[x_dim:]} # noqa: E731\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match=\"UnpackTransform.inv\"):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n\n assert_allclose(t, xy)\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected](\n \"jax_dist, params, support\",\n [\n (dist.BernoulliLogits, (5.0,), jnp.arange(2)),\n (dist.BernoulliProbs, (0.5,), jnp.arange(2)),\n (dist.BinomialLogits, (4.5, 10), jnp.arange(11)),\n (dist.BinomialProbs, (0.5, 11), jnp.arange(12)),\n (dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),\n (dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),\n (dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),\n ],\n)\[email protected](\"batch_shape\", [(5,), ()])\[email protected](\"expand\", [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\[email protected](\"prepend_shape\", [(), (2, 3)])\[email protected](\"sample_shape\", [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape\n # test expand of expand\n assert (\n expanded_dist.expand((3,) + new_batch_shape).batch_shape\n == (3,) + new_batch_shape\n )\n # test expand error\n if prepend_shape:\n with pytest.raises(ValueError, match=\"Cannot broadcast distribution of shape\"):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected](\"base_shape\", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])\[email protected](\"event_dim\", [0, 1, 2, 3])\[email protected](\"sample_shape\", [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = (2, 3, 5)\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\[email protected](\"batch_shape\", [(), (4,), (10, 3)])\ndef test_sine_bivariate_von_mises_batch_shape(batch_shape):\n phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)\n psi_loc = jnp.array(0.0)\n phi_conc = jnp.array(1.0)\n psi_conc = jnp.array(1.0)\n corr = jnp.array(0.1)\n\n sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)\n assert sine.batch_shape == batch_shape\n\n samples = sine.sample(random.PRNGKey(0))\n assert samples.shape == (*batch_shape, 2)\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n\n assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)\n\n\[email protected](\"batch_shape\", [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n\n # test density approximately normalized\n x = jnp.linspace(1.0e-6, d.truncation_point, num_points)\n prob = (d.truncation_point / num_points) * jnp.exp(\n logsumexp(d.log_prob(x), axis=-1)\n )\n assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)\n\n # test mean of approximate sampler\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected](\n \"extra_event_dims,expand_shape\",\n [(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],\n)\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = (4, 1, 1, 1, 6)\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n\n # Throws error when batch shape cannot be broadcasted\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n\n # Throws error when trying to shrink existing batch shape\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected](\n \"batch_shape, mask_shape\",\n [((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],\n)\[email protected](\"event_shape\", [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = (\n dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))\n )\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(\n actual != 0,\n jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),\n )\n\n\[email protected](\"event_shape\", [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(\n jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))\n )\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]\n return log_prob.sum()\n\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n def f(x):\n return jax_dist(*params)\n\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Cannot flattening ImproperUniform\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0) # this test for flatten/unflatten\n lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan\n # Test that parameters do not change after flattening.\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-6)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)\n\n\[email protected](\n \"method, arg\", [(\"to_event\", 1), (\"mask\", False), (\"expand\", [5])]\n)\ndef test_special_dist_pytree(method, arg):\n def f(x):\n d = dist.Normal(np.zeros(1), np.ones(1))\n return getattr(d, method)(arg)\n\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n # ExpandedDistribution can mutate the `batch_shape` of\n # its base distribution in order to make ExpandedDistribution\n # mappable, see #684. However, this mutation should not take\n # place if no mapping operation is performed.\n\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n # Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)\n # amounts to an identity operation.\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n # High-level test: `jax.jit`ting a function returning an ExpandedDistribution\n # (which involves an instance of the low-level case as it will transform\n # the original function by adding some flattening and unflattening steps)\n # should return same object as its non-jitted equivalent.\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected](\"shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\n \"p_dist, q_dist\",\n [\n (dist.Beta, dist.Beta),\n (dist.Gamma, dist.Gamma),\n (dist.Kumaraswamy, dist.Beta),\n (dist.Normal, dist.Normal),\n (dist.Weibull, dist.Gamma),\n ],\n)\ndef test_kl_univariate(shape, p_dist, q_dist):\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f\"Missing pattern for param {k}.\")\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\"shape\", [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10_000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n # test that vmapped binomial with p = 0 does not have an infinite loop\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\ndef _get_vmappable_dist_init_params(jax_dist):\n if jax_dist.__name__ == (\"_TruncatedCauchy\"):\n return [2, 3]\n elif jax_dist.__name__ == (\"_TruncatedNormal\"):\n return [2, 3]\n elif issubclass(jax_dist, dist.Distribution):\n init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[\n 1:\n ]\n vmap_over_parameters = list(\n inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()\n )[1:]\n return list(\n [\n i\n for i, name in enumerate(init_parameters)\n if name in vmap_over_parameters\n ]\n )\n else:\n raise ValueError\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[: len(params)]\n\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n\n d = make_jax_dist(*params)\n\n if isinstance(d, _SparseCAR) and d.is_sparse:\n # In this case, since csr arrays are not jittable,\n # _SparseCAR has a csr_matrix as part of its pytree\n # definition (not as a pytree leaf). This causes pytree\n # operations like tree_map to fail, since these functions\n # compare the pytree def of each of the arguments using ==\n # which is ambiguous for array-like objects.\n return\n\n in_out_axes_cases = [\n # vmap over all args\n (\n tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),\n 0,\n ),\n # vmap over a single arg, out over all attributes of a distribution\n *(\n ([0 if i == idx else None for i in range(len(params))], 0)\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, out over the associated attribute of the distribution\n *(\n (\n [0 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 0}),\n )\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, axis=1, (out single attribute, axis=1)\n *(\n (\n [1 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 1}),\n )\n for idx in vmappable_param_idxs\n if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0\n # skip this distribution because _GeneralMixture.__init__ turns\n # 1d inputs into 0d attributes, thus breaks the expectations of\n # the vmapping test case where in_axes=1, only done for rank>=1 tensors.\n and jax_dist is not _GeneralMixture\n ),\n ]\n\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [\n jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)\n if isinstance(ax, int)\n else arg\n for arg, ax in zip(params, in_axes)\n ]\n # Recreate the jax_dist to avoid side effects coming from `d.sample`\n # triggering lazy_property computations, which, in a few cases, break\n # vmap_over's expectations regarding existing attributes to be vmapped.\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(\n *batched_params\n )\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d\n )\n assert eq == jnp.array([True])\n\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(\n key\n )\n\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-6)\n\n\ndef test_normal_log_cdf():\n # test if log_cdf method agrees with jax.scipy.stats.norm.logcdf\n # and if exp(log_cdf) agrees with cdf\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)\n\n\[email protected](\n \"value\",\n [\n -15.0,\n jnp.array([[-15.0], [-10.0], [-5.0]]),\n jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),\n ],\n)\ndef test_truncated_normal_log_prob_in_tail(value):\n # define set of distributions truncated in tail of distribution\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = (-20, -1.0)\n a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input\n\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(\n value\n )\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n # test, if samples from distributions truncated in\n # tail of distribution returns any inf's\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-ids": [
97,
100,
123,
125,
137
]
}
|
[
97,
100,
123,
125,
137
] |
import oneflow as flow
import torch
def convert_torch_to_flow(model, torch_weight_path, save_path):
parameters = torch.load(torch_weight_path)
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
val = value.detach().cpu().numpy()
new_parameters[key] = val
model.load_state_dict(new_parameters)
flow.save(model.state_dict(), save_path)
|
normal
|
{
"blob_id": "8a3cf65550893367b9001369111fa19a3e998d82",
"index": 9589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-3": "import oneflow as flow\nimport torch\n\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if 'num_batches_tracked' not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-4": "import oneflow as flow\nimport torch\n\ndef convert_torch_to_flow(model, torch_weight_path, save_path):\n parameters = torch.load(torch_weight_path)\n new_parameters = dict()\n for key, value in parameters.items():\n if \"num_batches_tracked\" not in key:\n val = value.detach().cpu().numpy()\n new_parameters[key] = val\n model.load_state_dict(new_parameters)\n flow.save(model.state_dict(), save_path)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Config(object):
_DEFAULT = {'url': 'http://localhost:9999/notify', 'title':
'IRC Notification', 'activate_label': '', 'sound': ''}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
_DEFAULT = {'url': 'http://localhost:9999/notify', 'title':
'IRC Notification', 'activate_label': '', 'sound': ''}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
def config_cb(data, option, value):
cfg.update()
return weechat.WEECHAT_RC_OK
def send_notify(**kwargs):
data = json.dumps(kwargs)
req = urllib2.Request(cfg['url'], data, {'Content-Type':
'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
_DEFAULT = {'url': 'http://localhost:9999/notify', 'title':
'IRC Notification', 'activate_label': '', 'sound': ''}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
def config_cb(data, option, value):
cfg.update()
return weechat.WEECHAT_RC_OK
def send_notify(**kwargs):
data = json.dumps(kwargs)
req = urllib2.Request(cfg['url'], data, {'Content-Type':
'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
def notify(subtitle, message):
opt = {}
if cfg['activate_label']:
opt['activate'] = cfg['activate_label']
if cfg['sound']:
opt['sound'] = cfg['sound']
send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)
def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message
):
highlight = bool(highlight)
buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')
buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')
away = weechat.buffer_get_string(pbuffer, 'localvar_away')
if buffer_type == 'private':
notify('Private message from {}'.format(buffer_name), message)
elif buffer_type == 'channel' and highlight:
notify('Highlight {}@{}'.format(prefix, buffer_name), message)
return weechat.WEECHAT_RC_OK
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import weechat
except:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: http://www.weechat.org/')
import_ok = False
try:
import json, urllib2
except ImportError as message:
print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))
import_ok = False
<|reserved_special_token_0|>
class Config(object):
_DEFAULT = {'url': 'http://localhost:9999/notify', 'title':
'IRC Notification', 'activate_label': '', 'sound': ''}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
def config_cb(data, option, value):
cfg.update()
return weechat.WEECHAT_RC_OK
def send_notify(**kwargs):
data = json.dumps(kwargs)
req = urllib2.Request(cfg['url'], data, {'Content-Type':
'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
def notify(subtitle, message):
opt = {}
if cfg['activate_label']:
opt['activate'] = cfg['activate_label']
if cfg['sound']:
opt['sound'] = cfg['sound']
send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)
def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message
):
highlight = bool(highlight)
buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')
buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')
away = weechat.buffer_get_string(pbuffer, 'localvar_away')
if buffer_type == 'private':
notify('Private message from {}'.format(buffer_name), message)
elif buffer_type == 'channel' and highlight:
notify('Highlight {}@{}'.format(prefix, buffer_name), message)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
cfg = Config()
weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*',
'config_cb', '')
weechat.hook_print('', '', '', 1, 'handle_msg', '')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Brandon Bennett <[email protected]>
#
# Send a notification via notifyserver (https://github.com/nemith/notifyserver)
# on highlight/private message or new DCC.
#
# History:
#
# 2015-02-07, Brandon Bennett <[email protected]>:
# version 0.1: initial release
#
SCRIPT_NAME = 'notifyserver'
SCRIPT_AUTHOR = 'Brandon Bennett <[email protected]>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'MIT'
SCRIPT_DESC = 'Send a notification to a notifyserver on highlight/private message or new DCC'
import_ok = True
try:
import weechat
except:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: http://www.weechat.org/')
import_ok = False
try:
import json, urllib2
except ImportError as message:
print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))
import_ok = False
cfg = None
class Config(object):
_DEFAULT = {
'url' : 'http://localhost:9999/notify',
'title': 'IRC Notification',
'activate_label': "",
'sound': "",
}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
def config_cb(data, option, value):
cfg.update()
return weechat.WEECHAT_RC_OK
def send_notify(**kwargs):
data = json.dumps(kwargs)
req = urllib2.Request(cfg['url'], data, {'Content-Type': 'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
def notify(subtitle, message):
opt = {}
if cfg['activate_label']:
opt['activate'] = cfg['activate_label']
if cfg['sound']:
opt['sound'] = cfg['sound']
send_notify(
title=cfg['title'],
subtitle=subtitle,
message=message,
**opt)
def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message):
highlight = bool(highlight)
buffer_type = weechat.buffer_get_string(pbuffer, "localvar_type")
buffer_name = weechat.buffer_get_string(pbuffer, "short_name")
away = weechat.buffer_get_string(pbuffer, "localvar_away")
if buffer_type == 'private':
notify("Private message from {}".format(buffer_name), message)
elif buffer_type == 'channel' and highlight:
notify("Highlight {}@{}".format(prefix, buffer_name), message)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
cfg = Config()
weechat.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_cb", "")
weechat.hook_print("", "", "", 1, "handle_msg", "")
|
flexible
|
{
"blob_id": "0ae9ad7af26e3d19f2d3967c02611503c32aea70",
"index": 2593,
"step-1": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)\n\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message\n ):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')\n buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')\n away = weechat.buffer_get_string(pbuffer, 'localvar_away')\n if buffer_type == 'private':\n notify('Private message from {}'.format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify('Highlight {}@{}'.format(prefix, buffer_name), message)\n return weechat.WEECHAT_RC_OK\n\n\n<mask token>\n",
"step-4": "<mask token>\ntry:\n import weechat\nexcept:\n print('This script must be run under WeeChat.')\n print('Get WeeChat now at: http://www.weechat.org/')\n import_ok = False\ntry:\n import json, urllib2\nexcept ImportError as message:\n print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))\n import_ok = False\n<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)\n\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message\n ):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')\n buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')\n away = weechat.buffer_get_string(pbuffer, 'localvar_away')\n if buffer_type == 'private':\n notify('Private message from {}'.format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify('Highlight {}@{}'.format(prefix, buffer_name), message)\n return weechat.WEECHAT_RC_OK\n\n\nif __name__ == '__main__' and import_ok:\n if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, '', ''):\n cfg = Config()\n weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*',\n 'config_cb', '')\n weechat.hook_print('', '', '', 1, 'handle_msg', '')\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 Brandon Bennett <[email protected]>\n#\n# Send a notification via notifyserver (https://github.com/nemith/notifyserver) \n# on highlight/private message or new DCC.\n#\n# History:\n#\n# 2015-02-07, Brandon Bennett <[email protected]>:\n# version 0.1: initial release\n#\n\nSCRIPT_NAME = 'notifyserver'\nSCRIPT_AUTHOR = 'Brandon Bennett <[email protected]>'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'MIT'\nSCRIPT_DESC = 'Send a notification to a notifyserver on highlight/private message or new DCC'\n\nimport_ok = True\n\ntry:\n import weechat\nexcept:\n print('This script must be run under WeeChat.')\n print('Get WeeChat now at: http://www.weechat.org/')\n import_ok = False\n\ntry:\n import json, urllib2\nexcept ImportError as message:\n print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))\n import_ok = False\n\n\ncfg = None\n\n\nclass Config(object):\n _DEFAULT = {\n 'url' : 'http://localhost:9999/notify',\n 'title': 'IRC Notification',\n 'activate_label': \"\",\n 'sound': \"\",\n }\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value) \n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type': 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n\n send_notify(\n title=cfg['title'],\n subtitle=subtitle,\n message=message,\n **opt)\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, \"localvar_type\")\n buffer_name = weechat.buffer_get_string(pbuffer, \"short_name\")\n away = weechat.buffer_get_string(pbuffer, \"localvar_away\")\n\n if buffer_type == 'private':\n notify(\"Private message from {}\".format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify(\"Highlight {}@{}\".format(prefix, buffer_name), message)\n\n return weechat.WEECHAT_RC_OK\n\nif __name__ == '__main__' and import_ok:\n if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, '', ''):\n cfg = Config()\n \n weechat.hook_config(\"plugins.var.python.\" + SCRIPT_NAME + \".*\", \"config_cb\", \"\")\n weechat.hook_print(\"\", \"\", \"\", 1, \"handle_msg\", \"\")\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
from django.contrib import admin
from .models import Sport
from .models import Action
admin.site.register(Sport)
admin.site.register(Action)
|
normal
|
{
"blob_id": "ab38371ee3941e214344497b7e56786908a9b3d1",
"index": 2236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-3": "from django.contrib import admin\nfrom .models import Sport\nfrom .models import Action\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, 5):
with open('Desktop/' + str(i) + '.log', 'r') as r:
with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:
for line in r:
if not any(s in line for s in no_list):
w.write(line)
<|reserved_special_token_1|>
no_list = {'tor:', 'getblocktemplate', ' ping ', ' pong '}
for i in range(1, 5):
with open('Desktop/' + str(i) + '.log', 'r') as r:
with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:
for line in r:
if not any(s in line for s in no_list):
w.write(line)
<|reserved_special_token_1|>
no_list = {"tor:", "getblocktemplate", " ping ", " pong "}
for i in range(1, 5):
with open("Desktop/"+str(i)+".log", "r") as r:
with open("Desktop/"+str(i)+"-clean.log", "a+") as w:
for line in r:
if not any(s in line for s in no_list):
w.write(line)
|
flexible
|
{
"blob_id": "f14a8d0d51f0baefe20b2699ffa82112dad9c38f",
"index": 6582,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 5):\n with open('Desktop/' + str(i) + '.log', 'r') as r:\n with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:\n for line in r:\n if not any(s in line for s in no_list):\n w.write(line)\n",
"step-3": "no_list = {'tor:', 'getblocktemplate', ' ping ', ' pong '}\nfor i in range(1, 5):\n with open('Desktop/' + str(i) + '.log', 'r') as r:\n with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:\n for line in r:\n if not any(s in line for s in no_list):\n w.write(line)\n",
"step-4": "no_list = {\"tor:\", \"getblocktemplate\", \" ping \", \" pong \"}\nfor i in range(1, 5):\n\twith open(\"Desktop/\"+str(i)+\".log\", \"r\") as r:\n\t\twith open(\"Desktop/\"+str(i)+\"-clean.log\", \"a+\") as w:\n\t\t\tfor line in r:\n\t\t\t\tif not any(s in line for s in no_list):\n\t\t\t\t\tw.write(line)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'name': 'EDC Analytic Entry', 'depends': ['stock_account',
'purchase_stock', 'account_accountant'], 'description': '\n ',
'author': 'Ejaftech', 'data': ['views/account_move_view.xml']}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
{
'name': 'EDC Analytic Entry',
'depends': [
'stock_account',
'purchase_stock',
'account_accountant',
],
"description": """
""",
'author': "Ejaftech",
'data': [
'views/account_move_view.xml',
],
}
|
flexible
|
{
"blob_id": "797e7c1b3e8b41a167bfbedfb6a9449e6426ba22",
"index": 8570,
"step-1": "<mask token>\n",
"step-2": "{'name': 'EDC Analytic Entry', 'depends': ['stock_account',\n 'purchase_stock', 'account_accountant'], 'description': '\\n ',\n 'author': 'Ejaftech', 'data': ['views/account_move_view.xml']}\n",
"step-3": "# -*- coding: utf-8 -*-\n{\n 'name': 'EDC Analytic Entry',\n 'depends': [\n 'stock_account',\n 'purchase_stock',\n 'account_accountant',\n\n ],\n \"description\": \"\"\"\n \"\"\",\n 'author': \"Ejaftech\",\n\n 'data': [\n 'views/account_move_view.xml',\n ],\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 2.0.3 on 2018-03-24 07:53
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('printers', '0001_initial'),
('devices', '0002_url'),
]
operations = [
migrations.CreateModel(
name='Cartridge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('image', models.ImageField(default='cartridge.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),
('in_stock', models.IntegerField()),
('comment', models.CharField(max_length=512)),
('contractors', models.ManyToManyField(to='devices.Contractor')),
('printers', models.ManyToManyField(to='printers.Printer')),
('urls', models.ManyToManyField(to='devices.Url')),
],
),
migrations.CreateModel(
name='Index',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.CharField(max_length=512, unique=True)),
],
),
migrations.CreateModel(
name='Spare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('image', models.ImageField(default='spare.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),
('in_stock', models.IntegerField()),
('comment', models.CharField(max_length=512)),
('contractors', models.ManyToManyField(to='devices.Contractor')),
('indexes', models.ManyToManyField(to='printer_spares.Index')),
('printers', models.ManyToManyField(to='printers.Printer')),
('urls', models.ManyToManyField(to='devices.Url')),
],
),
]
|
normal
|
{
"blob_id": "d8df9a9f95a1d4a9aa34987ec1244cc6c0c7c610",
"index": 8048,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('printers', '0001_initial'), ('devices', '0002_url')]\n operations = [migrations.CreateModel(name='Cartridge', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 512)), ('image', models.ImageField(default='cartridge.png', storage\n =django.core.files.storage.FileSystemStorage(location=\n './media/images/spares'), upload_to='')), ('in_stock', models.\n IntegerField()), ('comment', models.CharField(max_length=512)), (\n 'contractors', models.ManyToManyField(to='devices.Contractor')), (\n 'printers', models.ManyToManyField(to='printers.Printer')), ('urls',\n models.ManyToManyField(to='devices.Url'))]), migrations.CreateModel\n (name='Index', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('index',\n models.CharField(max_length=512, unique=True))]), migrations.\n CreateModel(name='Spare', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(max_length=512)), ('image',\n models.ImageField(default='spare.png', storage=django.core.files.\n storage.FileSystemStorage(location='./media/images/spares'),\n upload_to='')), ('in_stock', models.IntegerField()), ('comment',\n models.CharField(max_length=512)), ('contractors', models.\n ManyToManyField(to='devices.Contractor')), ('indexes', models.\n ManyToManyField(to='printer_spares.Index')), ('printers', models.\n ManyToManyField(to='printers.Printer')), ('urls', models.\n ManyToManyField(to='devices.Url'))])]\n",
"step-4": "import django.core.files.storage\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('printers', '0001_initial'), ('devices', '0002_url')]\n operations = [migrations.CreateModel(name='Cartridge', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 512)), ('image', models.ImageField(default='cartridge.png', storage\n =django.core.files.storage.FileSystemStorage(location=\n './media/images/spares'), upload_to='')), ('in_stock', models.\n IntegerField()), ('comment', models.CharField(max_length=512)), (\n 'contractors', models.ManyToManyField(to='devices.Contractor')), (\n 'printers', models.ManyToManyField(to='printers.Printer')), ('urls',\n models.ManyToManyField(to='devices.Url'))]), migrations.CreateModel\n (name='Index', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('index',\n models.CharField(max_length=512, unique=True))]), migrations.\n CreateModel(name='Spare', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(max_length=512)), ('image',\n models.ImageField(default='spare.png', storage=django.core.files.\n storage.FileSystemStorage(location='./media/images/spares'),\n upload_to='')), ('in_stock', models.IntegerField()), ('comment',\n models.CharField(max_length=512)), ('contractors', models.\n ManyToManyField(to='devices.Contractor')), ('indexes', models.\n ManyToManyField(to='printer_spares.Index')), ('printers', models.\n ManyToManyField(to='printers.Printer')), ('urls', models.\n ManyToManyField(to='devices.Url'))])]\n",
"step-5": "# Generated by Django 2.0.3 on 2018-03-24 07:53\n\nimport django.core.files.storage\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('printers', '0001_initial'),\n ('devices', '0002_url'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cartridge',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=512)),\n ('image', models.ImageField(default='cartridge.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),\n ('in_stock', models.IntegerField()),\n ('comment', models.CharField(max_length=512)),\n ('contractors', models.ManyToManyField(to='devices.Contractor')),\n ('printers', models.ManyToManyField(to='printers.Printer')),\n ('urls', models.ManyToManyField(to='devices.Url')),\n ],\n ),\n migrations.CreateModel(\n name='Index',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('index', models.CharField(max_length=512, unique=True)),\n ],\n ),\n migrations.CreateModel(\n name='Spare',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=512)),\n ('image', models.ImageField(default='spare.png', storage=django.core.files.storage.FileSystemStorage(location='./media/images/spares'), upload_to='')),\n ('in_stock', models.IntegerField()),\n ('comment', models.CharField(max_length=512)),\n ('contractors', models.ManyToManyField(to='devices.Contractor')),\n ('indexes', models.ManyToManyField(to='printer_spares.Index')),\n ('printers', models.ManyToManyField(to='printers.Printer')),\n ('urls', models.ManyToManyField(to='devices.Url')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def merge(A, B):
C, m, n = [], len(A), len(B)
i, j = 0, 0
while i + j < m + n:
if i == m:
C.append(B[j])
j = j + 1
elif j == n:
C.append(A[i])
i = i + 1
elif A[i] < B[j]:
C.append(A[i])
i = i + 1
elif A[i] > B[j]:
C.append(B[j])
j = j + 1
else:
pass
return C
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge(A, B):
C, m, n = [], len(A), len(B)
i, j = 0, 0
while i + j < m + n:
if i == m:
C.append(B[j])
j = j + 1
elif j == n:
C.append(A[i])
i = i + 1
elif A[i] < B[j]:
C.append(A[i])
i = i + 1
elif A[i] > B[j]:
C.append(B[j])
j = j + 1
else:
pass
return C
def mergeSort(A, left, right):
if right - left <= 1:
return A[left:right]
if right - left > 1:
mid = (left + right) // 2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return merge(L, R)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge(A, B):
C, m, n = [], len(A), len(B)
i, j = 0, 0
while i + j < m + n:
if i == m:
C.append(B[j])
j = j + 1
elif j == n:
C.append(A[i])
i = i + 1
elif A[i] < B[j]:
C.append(A[i])
i = i + 1
elif A[i] > B[j]:
C.append(B[j])
j = j + 1
else:
pass
return C
def mergeSort(A, left, right):
if right - left <= 1:
return A[left:right]
if right - left > 1:
mid = (left + right) // 2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return merge(L, R)
a = range(1, 100, 2) + range(0, 100, 2)
<|reserved_special_token_1|>
from __future__ import division
def merge(A, B):
C, m, n = [], len(A), len(B)
i, j = 0, 0
while i + j < m + n:
if i == m:
C.append(B[j])
j = j + 1
elif j == n:
C.append(A[i])
i = i + 1
elif A[i] < B[j]:
C.append(A[i])
i = i + 1
elif A[i] > B[j]:
C.append(B[j])
j = j + 1
else:
pass
return C
def mergeSort(A, left, right):
if right - left <= 1:
return A[left:right]
if right - left > 1:
mid = (left + right) // 2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return merge(L, R)
a = range(1, 100, 2) + range(0, 100, 2)
<|reserved_special_token_1|>
#Merge Sort
#O(nlogn)
#Merge Part
from __future__ import division #use for python2
def merge(A, B): #Merge A[0:m], B[0,n]
(C, m, n) = ([], len(A), len(B))
(i, j) = (0, 0) #Current positions in A, B
while (i + j) < (m + n): #i+j is no. of elements merged so far
if i == m: #case 1: A is empty
C.append(B[j])
j = j+1
elif j == n: #case 2: B is empty
C.append(A[i])
i = i+1
elif A[i] < B[j]: #case 3: Head of A is smaller
C.append(A[i])
i = i+1
elif A[i] > B[j]: #case 4: Head of B is smaller
C.append(B[j])
j = j+1
else:
pass
return C
#A = range(0, 100, 2) # generate the lists
#B = range(1, 75, 2) # generate the lists
#print merge(A, B)
#print "\n"
#print len(A) + len(B)
#Sort Part
def mergeSort(A, left, right):
#Sort the slice A[left:right]
if (right - left) <= 1: #Base Case
return A[left:right]
if (right - left) > 1: #Recursive call
mid = (left + right)//2
L = mergeSort(A, left, mid)
R = mergeSort(A, mid, right)
return (merge(L,R))
a = range(1, 100, 2) + range(0, 100, 2)
#print a
#print mergeSort(a, 0, len(a))
|
flexible
|
{
"blob_id": "7b4c2689ad1d4601a108dd8aa6e3c4d1e9730dc5",
"index": 5257,
"step-1": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\na = range(1, 100, 2) + range(0, 100, 2)\n",
"step-4": "from __future__ import division\n\n\ndef merge(A, B):\n C, m, n = [], len(A), len(B)\n i, j = 0, 0\n while i + j < m + n:\n if i == m:\n C.append(B[j])\n j = j + 1\n elif j == n:\n C.append(A[i])\n i = i + 1\n elif A[i] < B[j]:\n C.append(A[i])\n i = i + 1\n elif A[i] > B[j]:\n C.append(B[j])\n j = j + 1\n else:\n pass\n return C\n\n\ndef mergeSort(A, left, right):\n if right - left <= 1:\n return A[left:right]\n if right - left > 1:\n mid = (left + right) // 2\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n return merge(L, R)\n\n\na = range(1, 100, 2) + range(0, 100, 2)\n",
"step-5": "\n#Merge Sort\n#O(nlogn)\n\n#Merge Part\n\nfrom __future__ import division #use for python2\n\ndef merge(A, B): #Merge A[0:m], B[0,n]\n (C, m, n) = ([], len(A), len(B))\n (i, j) = (0, 0) #Current positions in A, B\n\n while (i + j) < (m + n): #i+j is no. of elements merged so far\n if i == m: #case 1: A is empty\n C.append(B[j])\n j = j+1\n elif j == n: #case 2: B is empty\n C.append(A[i])\n i = i+1\n elif A[i] < B[j]: #case 3: Head of A is smaller\n C.append(A[i])\n i = i+1\n elif A[i] > B[j]: #case 4: Head of B is smaller\n C.append(B[j])\n j = j+1\n else:\n pass\n\n return C\n\n#A = range(0, 100, 2) # generate the lists\n#B = range(1, 75, 2) # generate the lists\n\n#print merge(A, B)\n#print \"\\n\"\n#print len(A) + len(B)\n\n#Sort Part\n\ndef mergeSort(A, left, right):\n #Sort the slice A[left:right]\n\n if (right - left) <= 1: #Base Case\n return A[left:right]\n\n if (right - left) > 1: #Recursive call\n mid = (left + right)//2\n\n L = mergeSort(A, left, mid)\n R = mergeSort(A, mid, right)\n\n return (merge(L,R))\n\na = range(1, 100, 2) + range(0, 100, 2)\n\n#print a\n\n#print mergeSort(a, 0, len(a))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from numpy import exp, array, dot
from read import normalized
class NeuralNetwork():
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
def __sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)
layer1_error = layer2_delta.dot(self.layer2.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
self.layer1 += layer1_adjustment
self.layer2 += layer2_adjustment
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.layer2))
return output_from_layer1, output_from_layer2
def print_weights(self):
print(self.layer1)
print(self.layer2)
if __name__ == "__main__":
layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])
layer2 = array([[0.5, 0.1]]).T
neural_network = NeuralNetwork(layer1, layer2)
neural_network.print_weights()
training_set_inputs = array(
[
[normalized_set['input1'][0], normalized_set['input2'][0], normalized_set['input3'][0]],
[normalized_set['input1'][1], normalized_set['input2'][1], normalized_set['input3'][1]],
[normalized_set['input1'][2], normalized_set['input2'][2], normalized_set['input3'][2]],
[normalized_set['input1'][3], normalized_set['input2'][3], normalized_set['input3'][3]],
[normalized_set['input1'][4], normalized_set['input2'][4], normalized_set['input3'][4]],
[normalized_set['input1'][5], normalized_set['input2'][5], normalized_set['input3'][5]]
])
training_set_outputs = array(
[[
normalized_set['output'][0],
normalized_set['output'][1],
normalized_set['output'][2],
normalized_set['output'][3],
normalized_set['output'][4],
normalized_set['output'][5]
]]).T
print("Inputs", training_set_inputs)
print("Output", training_set_outputs)
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print("Weights ")
neural_network.print_weights()
output = neural_network.think(array([0.5, 0.6, 0.1]))
print("Weights", output[0])
print("Out ", output[1])
|
normal
|
{
"blob_id": "8109fcc136b967e0ed4ca06077b32612605d5e5f",
"index": 1136,
"step-1": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n <mask token>\n <mask token>\n <mask token>\n\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))\n output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.\n layer2))\n return output_from_layer1, output_from_layer2\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n <mask token>\n\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n def train(self, training_set_inputs, training_set_outputs,\n number_of_training_iterations):\n for iteration in range(number_of_training_iterations):\n output_from_layer_1, output_from_layer_2 = self.think(\n training_set_inputs)\n layer2_error = training_set_outputs - output_from_layer_2\n layer2_delta = layer2_error * self.__sigmoid_derivative(\n output_from_layer_2)\n layer1_error = layer2_delta.dot(self.layer2.T)\n layer1_delta = layer1_error * self.__sigmoid_derivative(\n output_from_layer_1)\n layer1_adjustment = training_set_inputs.T.dot(layer1_delta)\n layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)\n self.layer1 += layer1_adjustment\n self.layer2 += layer2_adjustment\n\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))\n output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.\n layer2))\n return output_from_layer1, output_from_layer2\n\n def print_weights(self):\n print(self.layer1)\n print(self.layer2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NeuralNetwork:\n\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n\n def __sigmoid(self, x):\n return 1 / (1 + exp(-x))\n\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n def train(self, training_set_inputs, training_set_outputs,\n number_of_training_iterations):\n for iteration in range(number_of_training_iterations):\n output_from_layer_1, output_from_layer_2 = self.think(\n training_set_inputs)\n layer2_error = training_set_outputs - output_from_layer_2\n layer2_delta = layer2_error * self.__sigmoid_derivative(\n output_from_layer_2)\n layer1_error = layer2_delta.dot(self.layer2.T)\n layer1_delta = layer1_error * self.__sigmoid_derivative(\n output_from_layer_1)\n layer1_adjustment = training_set_inputs.T.dot(layer1_delta)\n layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)\n self.layer1 += layer1_adjustment\n self.layer2 += layer2_adjustment\n\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))\n output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.\n layer2))\n return output_from_layer1, output_from_layer2\n\n def print_weights(self):\n print(self.layer1)\n print(self.layer2)\n\n\nif __name__ == '__main__':\n layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])\n layer2 = array([[0.5, 0.1]]).T\n neural_network = NeuralNetwork(layer1, layer2)\n neural_network.print_weights()\n training_set_inputs = array([[normalized_set['input1'][0],\n normalized_set['input2'][0], normalized_set['input3'][0]], [\n normalized_set['input1'][1], normalized_set['input2'][1],\n normalized_set['input3'][1]], [normalized_set['input1'][2],\n normalized_set['input2'][2], normalized_set['input3'][2]], [\n normalized_set['input1'][3], normalized_set['input2'][3],\n normalized_set['input3'][3]], [normalized_set['input1'][4],\n normalized_set['input2'][4], normalized_set['input3'][4]], [\n normalized_set['input1'][5], normalized_set['input2'][5],\n normalized_set['input3'][5]]])\n training_set_outputs = array([[normalized_set['output'][0],\n normalized_set['output'][1], normalized_set['output'][2],\n normalized_set['output'][3], normalized_set['output'][4],\n normalized_set['output'][5]]]).T\n print('Inputs', training_set_inputs)\n print('Output', training_set_outputs)\n neural_network.train(training_set_inputs, training_set_outputs, 60000)\n print('Weights ')\n neural_network.print_weights()\n output = neural_network.think(array([0.5, 0.6, 0.1]))\n print('Weights', output[0])\n print('Out ', output[1])\n",
"step-4": "from numpy import exp, array, dot\nfrom read import normalized\n\n\nclass NeuralNetwork:\n\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n\n def __sigmoid(self, x):\n return 1 / (1 + exp(-x))\n\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n def train(self, training_set_inputs, training_set_outputs,\n number_of_training_iterations):\n for iteration in range(number_of_training_iterations):\n output_from_layer_1, output_from_layer_2 = self.think(\n training_set_inputs)\n layer2_error = training_set_outputs - output_from_layer_2\n layer2_delta = layer2_error * self.__sigmoid_derivative(\n output_from_layer_2)\n layer1_error = layer2_delta.dot(self.layer2.T)\n layer1_delta = layer1_error * self.__sigmoid_derivative(\n output_from_layer_1)\n layer1_adjustment = training_set_inputs.T.dot(layer1_delta)\n layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)\n self.layer1 += layer1_adjustment\n self.layer2 += layer2_adjustment\n\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))\n output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.\n layer2))\n return output_from_layer1, output_from_layer2\n\n def print_weights(self):\n print(self.layer1)\n print(self.layer2)\n\n\nif __name__ == '__main__':\n layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])\n layer2 = array([[0.5, 0.1]]).T\n neural_network = NeuralNetwork(layer1, layer2)\n neural_network.print_weights()\n training_set_inputs = array([[normalized_set['input1'][0],\n normalized_set['input2'][0], normalized_set['input3'][0]], [\n normalized_set['input1'][1], normalized_set['input2'][1],\n normalized_set['input3'][1]], [normalized_set['input1'][2],\n normalized_set['input2'][2], normalized_set['input3'][2]], [\n normalized_set['input1'][3], normalized_set['input2'][3],\n normalized_set['input3'][3]], [normalized_set['input1'][4],\n normalized_set['input2'][4], normalized_set['input3'][4]], [\n normalized_set['input1'][5], normalized_set['input2'][5],\n normalized_set['input3'][5]]])\n training_set_outputs = array([[normalized_set['output'][0],\n normalized_set['output'][1], normalized_set['output'][2],\n normalized_set['output'][3], normalized_set['output'][4],\n normalized_set['output'][5]]]).T\n print('Inputs', training_set_inputs)\n print('Output', training_set_outputs)\n neural_network.train(training_set_inputs, training_set_outputs, 60000)\n print('Weights ')\n neural_network.print_weights()\n output = neural_network.think(array([0.5, 0.6, 0.1]))\n print('Weights', output[0])\n print('Out ', output[1])\n",
"step-5": "from numpy import exp, array, dot\n\nfrom read import normalized\n\nclass NeuralNetwork():\n def __init__(self, layer1, layer2):\n self.layer1 = layer1\n self.layer2 = layer2\n\n def __sigmoid(self, x):\n return 1 / (1 + exp(-x))\n\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):\n for iteration in range(number_of_training_iterations):\n \n output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)\n\n layer2_error = training_set_outputs - output_from_layer_2\n layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)\n\n layer1_error = layer2_delta.dot(self.layer2.T)\n layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)\n\n layer1_adjustment = training_set_inputs.T.dot(layer1_delta)\n layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)\n\n self.layer1 += layer1_adjustment\n self.layer2 += layer2_adjustment\n\n\n def think(self, inputs):\n output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))\n output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.layer2))\n return output_from_layer1, output_from_layer2\n\n\n def print_weights(self):\n print(self.layer1)\n print(self.layer2)\n\n\nif __name__ == \"__main__\":\n \n layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])\n\n layer2 = array([[0.5, 0.1]]).T\n\n neural_network = NeuralNetwork(layer1, layer2)\n\n neural_network.print_weights()\n\n training_set_inputs = array(\n [\n [normalized_set['input1'][0], normalized_set['input2'][0], normalized_set['input3'][0]],\n [normalized_set['input1'][1], normalized_set['input2'][1], normalized_set['input3'][1]],\n [normalized_set['input1'][2], normalized_set['input2'][2], normalized_set['input3'][2]],\n [normalized_set['input1'][3], normalized_set['input2'][3], normalized_set['input3'][3]],\n [normalized_set['input1'][4], normalized_set['input2'][4], normalized_set['input3'][4]],\n [normalized_set['input1'][5], normalized_set['input2'][5], normalized_set['input3'][5]]\n ])\n\n training_set_outputs = array(\n [[\n normalized_set['output'][0],\n normalized_set['output'][1],\n normalized_set['output'][2],\n normalized_set['output'][3],\n normalized_set['output'][4],\n normalized_set['output'][5]\n ]]).T\n\n print(\"Inputs\", training_set_inputs)\n print(\"Output\", training_set_outputs)\n\n neural_network.train(training_set_inputs, training_set_outputs, 60000)\n\n \n print(\"Weights \")\n neural_network.print_weights()\n\n \n output = neural_network.think(array([0.5, 0.6, 0.1]))\n print(\"Weights\", output[0])\n print(\"Out \", output[1])\n\n ",
"step-ids": [
3,
6,
8,
9,
10
]
}
|
[
3,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class Employee(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey('Employee', verbose_name='propietario')
client = models.ForeignKey('Client', verbose_name='cliente', null=True)
description = models.CharField(max_length=255, null=True, blank=True,
verbose_name='descripción')
date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')
value = models.IntegerField(verbose_name='valor')
register_type = models.PositiveSmallIntegerField(choices=
REGISTER_TYPE_CHOICES, verbose_name='servicio')
is_pay_with_card = models.BooleanField(default=False, verbose_name=
'Fue pago con tarjeta de credito')
product_name = models.CharField(max_length=255, verbose_name=
'nombre del producto')
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(self.id)
class Meta:
ordering = ['date']
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Employee(models.Model):
name = models.CharField(verbose_name='nombre', max_length=255)
document = models.CharField(primary_key=True, max_length=30,
verbose_name='cédula')
phone_number = models.CharField(max_length=10, verbose_name='celular',
null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey('Employee', verbose_name='propietario')
client = models.ForeignKey('Client', verbose_name='cliente', null=True)
description = models.CharField(max_length=255, null=True, blank=True,
verbose_name='descripción')
date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')
value = models.IntegerField(verbose_name='valor')
register_type = models.PositiveSmallIntegerField(choices=
REGISTER_TYPE_CHOICES, verbose_name='servicio')
is_pay_with_card = models.BooleanField(default=False, verbose_name=
'Fue pago con tarjeta de credito')
product_name = models.CharField(max_length=255, verbose_name=
'nombre del producto')
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(self.id)
class Meta:
ordering = ['date']
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EstheticHouse(models.Model):
name = models.CharField(verbose_name='nombre', max_length=512, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Casa Estética'
verbose_name_plural = 'Casas Estéticas'
class Client(models.Model):
name = models.CharField(max_length=255, verbose_name='nombre')
document = models.CharField(primary_key=True, max_length=30,
verbose_name='cédula')
phone_number = models.CharField(max_length=10, verbose_name='celular')
email = models.CharField(max_length=255, verbose_name=
'correo electrónico', null=True)
birthday = models.DateField(verbose_name='Fecha de Cumpleaños', null=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Cliente'
verbose_name_plural = 'Clientes'
class Product(models.Model):
code = models.CharField(primary_key=True, verbose_name='código',
max_length=255, unique=True)
house = models.ForeignKey('EstheticHouse', verbose_name='casa estética',
null=True)
name = models.CharField(max_length=255, verbose_name='nombre', blank=True)
price = models.IntegerField(verbose_name='precio')
amount = models.IntegerField(verbose_name='cantidad')
@property
def is_sold_out(self):
return self.amount == 0
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Producto'
verbose_name_plural = 'Productos'
class Employee(models.Model):
name = models.CharField(verbose_name='nombre', max_length=255)
document = models.CharField(primary_key=True, max_length=30,
verbose_name='cédula')
phone_number = models.CharField(max_length=10, verbose_name='celular',
null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey('Employee', verbose_name='propietario')
client = models.ForeignKey('Client', verbose_name='cliente', null=True)
description = models.CharField(max_length=255, null=True, blank=True,
verbose_name='descripción')
date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')
value = models.IntegerField(verbose_name='valor')
register_type = models.PositiveSmallIntegerField(choices=
REGISTER_TYPE_CHOICES, verbose_name='servicio')
is_pay_with_card = models.BooleanField(default=False, verbose_name=
'Fue pago con tarjeta de credito')
product_name = models.CharField(max_length=255, verbose_name=
'nombre del producto')
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(self.id)
class Meta:
ordering = ['date']
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
<|reserved_special_token_1|>
from django.db import models
from .data import REGISTER_TYPE_CHOICES
from .data import ENTRANCE_TYPE
from .data import EXPENSE_TYPE
class EstheticHouse(models.Model):
name = models.CharField(verbose_name='nombre', max_length=512, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Casa Estética'
verbose_name_plural = 'Casas Estéticas'
class Client(models.Model):
name = models.CharField(max_length=255, verbose_name='nombre')
document = models.CharField(primary_key=True, max_length=30,
verbose_name='cédula')
phone_number = models.CharField(max_length=10, verbose_name='celular')
email = models.CharField(max_length=255, verbose_name=
'correo electrónico', null=True)
birthday = models.DateField(verbose_name='Fecha de Cumpleaños', null=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Cliente'
verbose_name_plural = 'Clientes'
class Product(models.Model):
code = models.CharField(primary_key=True, verbose_name='código',
max_length=255, unique=True)
house = models.ForeignKey('EstheticHouse', verbose_name='casa estética',
null=True)
name = models.CharField(max_length=255, verbose_name='nombre', blank=True)
price = models.IntegerField(verbose_name='precio')
amount = models.IntegerField(verbose_name='cantidad')
@property
def is_sold_out(self):
return self.amount == 0
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Producto'
verbose_name_plural = 'Productos'
class Employee(models.Model):
name = models.CharField(verbose_name='nombre', max_length=255)
document = models.CharField(primary_key=True, max_length=30,
verbose_name='cédula')
phone_number = models.CharField(max_length=10, verbose_name='celular',
null=True, blank=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey('Employee', verbose_name='propietario')
client = models.ForeignKey('Client', verbose_name='cliente', null=True)
description = models.CharField(max_length=255, null=True, blank=True,
verbose_name='descripción')
date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')
value = models.IntegerField(verbose_name='valor')
register_type = models.PositiveSmallIntegerField(choices=
REGISTER_TYPE_CHOICES, verbose_name='servicio')
is_pay_with_card = models.BooleanField(default=False, verbose_name=
'Fue pago con tarjeta de credito')
product_name = models.CharField(max_length=255, verbose_name=
'nombre del producto')
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(self.id)
class Meta:
ordering = ['date']
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
<|reserved_special_token_1|>
from django.db import models
from .data import REGISTER_TYPE_CHOICES
from .data import ENTRANCE_TYPE
from .data import EXPENSE_TYPE
class EstheticHouse(models.Model):
name = models.CharField(
verbose_name='nombre',
max_length=512,
unique=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Casa Estética'
verbose_name_plural = 'Casas Estéticas'
class Client(models.Model):
name = models.CharField(
max_length=255,
verbose_name='nombre',
)
document = models.CharField(
primary_key=True,
max_length=30,
verbose_name='cédula',
)
phone_number = models.CharField(
max_length=10,
verbose_name='celular'
)
email = models.CharField(
max_length=255,
verbose_name='correo electrónico',
null=True,
)
birthday = models.DateField(
verbose_name='Fecha de Cumpleaños',
null=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Cliente'
verbose_name_plural = 'Clientes'
class Product(models.Model):
code = models.CharField(
primary_key=True,
verbose_name='código',
max_length=255,
unique=True,
)
house = models.ForeignKey(
'EstheticHouse',
verbose_name='casa estética',
null=True,
)
name = models.CharField(
max_length=255,
verbose_name='nombre',
blank=True,
)
price = models.IntegerField(
verbose_name='precio',
)
amount = models.IntegerField(
verbose_name='cantidad',
)
@property
def is_sold_out(self):
return self.amount == 0
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Producto'
verbose_name_plural = 'Productos'
class Employee(models.Model):
name = models.CharField(
verbose_name='nombre',
max_length=255,
)
document = models.CharField(
primary_key=True,
max_length=30,
verbose_name='cédula',
)
phone_number = models.CharField(
max_length=10,
verbose_name='celular',
null=True,
blank=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey(
'Employee',
verbose_name='propietario'
)
client = models.ForeignKey(
'Client',
verbose_name='cliente',
null=True,
)
description = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name='descripción'
)
date = models.DateTimeField(
auto_now_add=True,
verbose_name='fecha',
)
value = models.IntegerField(
verbose_name='valor',
)
register_type = models.PositiveSmallIntegerField(
choices=REGISTER_TYPE_CHOICES,
verbose_name='servicio',
)
is_pay_with_card = models.BooleanField(
default=False,
verbose_name='Fue pago con tarjeta de credito'
)
product_name = models.CharField(
max_length=255,
verbose_name='nombre del producto'
)
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(
self.id
)
class Meta:
ordering = ['date',]
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
|
flexible
|
{
"blob_id": "df25b51010fdbcbf1a8949a7a755a3a982bbf648",
"index": 6352,
"step-1": "<mask token>\n\n\nclass Employee(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Empleado'\n verbose_name_plural = 'Empleados'\n\n\nclass Register(models.Model):\n owner = models.ForeignKey('Employee', verbose_name='propietario')\n client = models.ForeignKey('Client', verbose_name='cliente', null=True)\n description = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='descripción')\n date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')\n value = models.IntegerField(verbose_name='valor')\n register_type = models.PositiveSmallIntegerField(choices=\n REGISTER_TYPE_CHOICES, verbose_name='servicio')\n is_pay_with_card = models.BooleanField(default=False, verbose_name=\n 'Fue pago con tarjeta de credito')\n product_name = models.CharField(max_length=255, verbose_name=\n 'nombre del producto')\n\n @property\n def is_entrance(self):\n return self.register_type == ENTRANCE_TYPE\n\n @property\n def is_expense(self):\n return self.register_type == EXPENSE_TYPE\n\n def __str__(self):\n return 'Registro número {}'.format(self.id)\n\n\n class Meta:\n ordering = ['date']\n verbose_name = 'Registro'\n verbose_name_plural = 'Registros'\n",
"step-2": "<mask token>\n\n\nclass Employee(models.Model):\n name = models.CharField(verbose_name='nombre', max_length=255)\n document = models.CharField(primary_key=True, max_length=30,\n verbose_name='cédula')\n phone_number = models.CharField(max_length=10, verbose_name='celular',\n null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Empleado'\n verbose_name_plural = 'Empleados'\n\n\nclass Register(models.Model):\n owner = models.ForeignKey('Employee', verbose_name='propietario')\n client = models.ForeignKey('Client', verbose_name='cliente', null=True)\n description = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='descripción')\n date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')\n value = models.IntegerField(verbose_name='valor')\n register_type = models.PositiveSmallIntegerField(choices=\n REGISTER_TYPE_CHOICES, verbose_name='servicio')\n is_pay_with_card = models.BooleanField(default=False, verbose_name=\n 'Fue pago con tarjeta de credito')\n product_name = models.CharField(max_length=255, verbose_name=\n 'nombre del producto')\n\n @property\n def is_entrance(self):\n return self.register_type == ENTRANCE_TYPE\n\n @property\n def is_expense(self):\n return self.register_type == EXPENSE_TYPE\n\n def __str__(self):\n return 'Registro número {}'.format(self.id)\n\n\n class Meta:\n ordering = ['date']\n verbose_name = 'Registro'\n verbose_name_plural = 'Registros'\n",
"step-3": "<mask token>\n\n\nclass EstheticHouse(models.Model):\n name = models.CharField(verbose_name='nombre', max_length=512, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Casa Estética'\n verbose_name_plural = 'Casas Estéticas'\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=255, verbose_name='nombre')\n document = models.CharField(primary_key=True, max_length=30,\n verbose_name='cédula')\n phone_number = models.CharField(max_length=10, verbose_name='celular')\n email = models.CharField(max_length=255, verbose_name=\n 'correo electrónico', null=True)\n birthday = models.DateField(verbose_name='Fecha de Cumpleaños', null=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Cliente'\n verbose_name_plural = 'Clientes'\n\n\nclass Product(models.Model):\n code = models.CharField(primary_key=True, verbose_name='código',\n max_length=255, unique=True)\n house = models.ForeignKey('EstheticHouse', verbose_name='casa estética',\n null=True)\n name = models.CharField(max_length=255, verbose_name='nombre', blank=True)\n price = models.IntegerField(verbose_name='precio')\n amount = models.IntegerField(verbose_name='cantidad')\n\n @property\n def is_sold_out(self):\n return self.amount == 0\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Producto'\n verbose_name_plural = 'Productos'\n\n\nclass Employee(models.Model):\n name = models.CharField(verbose_name='nombre', max_length=255)\n document = models.CharField(primary_key=True, max_length=30,\n verbose_name='cédula')\n phone_number = models.CharField(max_length=10, verbose_name='celular',\n null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Empleado'\n verbose_name_plural = 'Empleados'\n\n\nclass Register(models.Model):\n owner = models.ForeignKey('Employee', verbose_name='propietario')\n client = models.ForeignKey('Client', verbose_name='cliente', null=True)\n description = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='descripción')\n date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')\n value = models.IntegerField(verbose_name='valor')\n register_type = models.PositiveSmallIntegerField(choices=\n REGISTER_TYPE_CHOICES, verbose_name='servicio')\n is_pay_with_card = models.BooleanField(default=False, verbose_name=\n 'Fue pago con tarjeta de credito')\n product_name = models.CharField(max_length=255, verbose_name=\n 'nombre del producto')\n\n @property\n def is_entrance(self):\n return self.register_type == ENTRANCE_TYPE\n\n @property\n def is_expense(self):\n return self.register_type == EXPENSE_TYPE\n\n def __str__(self):\n return 'Registro número {}'.format(self.id)\n\n\n class Meta:\n ordering = ['date']\n verbose_name = 'Registro'\n verbose_name_plural = 'Registros'\n",
"step-4": "from django.db import models\nfrom .data import REGISTER_TYPE_CHOICES\nfrom .data import ENTRANCE_TYPE\nfrom .data import EXPENSE_TYPE\n\n\nclass EstheticHouse(models.Model):\n name = models.CharField(verbose_name='nombre', max_length=512, unique=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Casa Estética'\n verbose_name_plural = 'Casas Estéticas'\n\n\nclass Client(models.Model):\n name = models.CharField(max_length=255, verbose_name='nombre')\n document = models.CharField(primary_key=True, max_length=30,\n verbose_name='cédula')\n phone_number = models.CharField(max_length=10, verbose_name='celular')\n email = models.CharField(max_length=255, verbose_name=\n 'correo electrónico', null=True)\n birthday = models.DateField(verbose_name='Fecha de Cumpleaños', null=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Cliente'\n verbose_name_plural = 'Clientes'\n\n\nclass Product(models.Model):\n code = models.CharField(primary_key=True, verbose_name='código',\n max_length=255, unique=True)\n house = models.ForeignKey('EstheticHouse', verbose_name='casa estética',\n null=True)\n name = models.CharField(max_length=255, verbose_name='nombre', blank=True)\n price = models.IntegerField(verbose_name='precio')\n amount = models.IntegerField(verbose_name='cantidad')\n\n @property\n def is_sold_out(self):\n return self.amount == 0\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Producto'\n verbose_name_plural = 'Productos'\n\n\nclass Employee(models.Model):\n name = models.CharField(verbose_name='nombre', max_length=255)\n document = models.CharField(primary_key=True, max_length=30,\n verbose_name='cédula')\n phone_number = models.CharField(max_length=10, verbose_name='celular',\n null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n ordering = ['name']\n verbose_name = 'Empleado'\n verbose_name_plural = 'Empleados'\n\n\nclass Register(models.Model):\n owner = models.ForeignKey('Employee', verbose_name='propietario')\n client = models.ForeignKey('Client', verbose_name='cliente', null=True)\n description = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='descripción')\n date = models.DateTimeField(auto_now_add=True, verbose_name='fecha')\n value = models.IntegerField(verbose_name='valor')\n register_type = models.PositiveSmallIntegerField(choices=\n REGISTER_TYPE_CHOICES, verbose_name='servicio')\n is_pay_with_card = models.BooleanField(default=False, verbose_name=\n 'Fue pago con tarjeta de credito')\n product_name = models.CharField(max_length=255, verbose_name=\n 'nombre del producto')\n\n @property\n def is_entrance(self):\n return self.register_type == ENTRANCE_TYPE\n\n @property\n def is_expense(self):\n return self.register_type == EXPENSE_TYPE\n\n def __str__(self):\n return 'Registro número {}'.format(self.id)\n\n\n class Meta:\n ordering = ['date']\n verbose_name = 'Registro'\n verbose_name_plural = 'Registros'\n",
"step-5": "from django.db import models\n\nfrom .data import REGISTER_TYPE_CHOICES\nfrom .data import ENTRANCE_TYPE\nfrom .data import EXPENSE_TYPE\n\n\nclass EstheticHouse(models.Model):\n name = models.CharField(\n verbose_name='nombre',\n max_length=512,\n unique=True,\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name',]\n verbose_name = 'Casa Estética'\n verbose_name_plural = 'Casas Estéticas'\n\n\nclass Client(models.Model):\n name = models.CharField(\n max_length=255,\n verbose_name='nombre',\n )\n\n document = models.CharField(\n primary_key=True,\n max_length=30,\n verbose_name='cédula',\n )\n\n phone_number = models.CharField(\n max_length=10,\n verbose_name='celular'\n )\n\n email = models.CharField(\n max_length=255,\n verbose_name='correo electrónico',\n null=True,\n )\n\n birthday = models.DateField(\n verbose_name='Fecha de Cumpleaños',\n null=True,\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name',]\n verbose_name = 'Cliente'\n verbose_name_plural = 'Clientes'\n\n\nclass Product(models.Model):\n code = models.CharField(\n primary_key=True,\n verbose_name='código',\n max_length=255,\n unique=True,\n )\n\n house = models.ForeignKey(\n 'EstheticHouse',\n verbose_name='casa estética',\n null=True,\n )\n\n name = models.CharField(\n max_length=255,\n verbose_name='nombre',\n blank=True,\n )\n\n price = models.IntegerField(\n verbose_name='precio',\n )\n\n amount = models.IntegerField(\n verbose_name='cantidad',\n )\n\n @property\n def is_sold_out(self):\n return self.amount == 0\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name',]\n verbose_name = 'Producto'\n verbose_name_plural = 'Productos'\n\nclass Employee(models.Model):\n name = models.CharField(\n verbose_name='nombre',\n max_length=255,\n )\n\n document = models.CharField(\n primary_key=True,\n max_length=30,\n verbose_name='cédula',\n )\n\n phone_number = models.CharField(\n max_length=10,\n verbose_name='celular',\n null=True,\n blank=True,\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name',]\n verbose_name = 'Empleado'\n verbose_name_plural = 'Empleados'\n\n\nclass Register(models.Model):\n owner = models.ForeignKey(\n 'Employee',\n verbose_name='propietario'\n )\n\n client = models.ForeignKey(\n 'Client',\n verbose_name='cliente',\n null=True,\n )\n\n description = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name='descripción'\n )\n\n date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='fecha',\n )\n\n value = models.IntegerField(\n verbose_name='valor',\n )\n\n register_type = models.PositiveSmallIntegerField(\n choices=REGISTER_TYPE_CHOICES,\n verbose_name='servicio',\n )\n\n is_pay_with_card = models.BooleanField(\n default=False,\n verbose_name='Fue pago con tarjeta de credito'\n )\n\n product_name = models.CharField(\n max_length=255,\n verbose_name='nombre del producto'\n )\n\n @property\n def is_entrance(self):\n return self.register_type == ENTRANCE_TYPE\n @property\n def is_expense(self):\n return self.register_type == EXPENSE_TYPE\n\n def __str__(self):\n return 'Registro número {}'.format(\n self.id\n )\n\n class Meta:\n ordering = ['date',]\n verbose_name = 'Registro'\n verbose_name_plural = 'Registros'\n",
"step-ids": [
7,
8,
18,
19,
20
]
}
|
[
7,
8,
18,
19,
20
] |
# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.
# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None or head.next is None or k == 0:
return head
tmp, length = head, 1
while tmp.next:
tmp = tmp.next
length += 1
k = k % length
if k == 0: # don't need rotate
return head
fast = slow = head # fast and slow point
for _ in range(k):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
res = slow.next # ready result
slow.next = None
fast.next = head
return res
|
normal
|
{
"blob_id": "a79c9799ed237a943ae3d249a4d66eb2f8693e83",
"index": 1896,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def rotateRight(self, head: ListNode, k: int) ->ListNode:\n if head is None or head.next is None or k == 0:\n return head\n tmp, length = head, 1\n while tmp.next:\n tmp = tmp.next\n length += 1\n k = k % length\n if k == 0:\n return head\n fast = slow = head\n for _ in range(k):\n fast = fast.next\n while fast.next:\n fast = fast.next\n slow = slow.next\n res = slow.next\n slow.next = None\n fast.next = head\n return res\n",
"step-4": "# Runtime: 44 ms, faster than 62.95% of Python3 online submissions for Rotate List.\r\n# Memory Usage: 13.9 MB, less than 6.05% of Python3 online submissions for Rotate List.\r\n# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\r\n if head is None or head.next is None or k == 0:\r\n return head\r\n tmp, length = head, 1\r\n while tmp.next:\r\n tmp = tmp.next\r\n length += 1\r\n k = k % length\r\n if k == 0: # don't need rotate\r\n return head\r\n fast = slow = head # fast and slow point\r\n for _ in range(k):\r\n fast = fast.next\r\n while fast.next:\r\n fast = fast.next\r\n slow = slow.next\r\n res = slow.next # ready result\r\n slow.next = None\r\n fast.next = head\r\n return res\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('config.json') as config_file:
initdata = json.load(config_file)
<|reserved_special_token_0|>
pend.updCartesian()
pend.updEnergies()
<|reserved_special_token_0|>
if method == 1:
for n in range(nCycles):
time += timeStep
pend.updEuler(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
print(p22)
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 2:
for n in range(nCycles):
print(n)
time += timeStep
pend.updEulerCromer(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 3:
for n in range(nCycles):
print(n)
time += timeStep
pend.updRungeKutta(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
else:
print('invalid method selection, update config file')
exit()
np.save(Path.cwd() / 'datafile', data, allow_pickle=True)
print('data file saved')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('config.json') as config_file:
initdata = json.load(config_file)
initMA = initdata['Mass A']
initMB = initdata['Mass B']
initLA = initdata['Length A']
initLB = initdata['Length B']
initAA = initdata['Angle A']
initAB = initdata['Angle B']
method = initdata['Method']
timeStep = initdata['Time Step']
nCycles = initdata['Number of Cycles']
pend = DP(initMA, initMB, initLA, initLB, math.radians(initAA), math.
radians(initAB), [0, 0], [0, 0], [0, 0], [0, 0], 0, 0, 1, 1, 1, 1, 1, 1, 1)
pend.updCartesian()
pend.updEnergies()
data = []
time = 0
x1 = 0
x2 = 0
y1 = 0
y2 = 0
if method == 1:
for n in range(nCycles):
time += timeStep
pend.updEuler(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
print(p22)
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 2:
for n in range(nCycles):
print(n)
time += timeStep
pend.updEulerCromer(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 3:
for n in range(nCycles):
print(n)
time += timeStep
pend.updRungeKutta(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
else:
print('invalid method selection, update config file')
exit()
np.save(Path.cwd() / 'datafile', data, allow_pickle=True)
print('data file saved')
<|reserved_special_token_1|>
import numpy as np
import matplotlib as plt
import math
from DoublePendulum import DP
import json
import pandas as pd
import copy
from pathlib import Path
with open('config.json') as config_file:
initdata = json.load(config_file)
initMA = initdata['Mass A']
initMB = initdata['Mass B']
initLA = initdata['Length A']
initLB = initdata['Length B']
initAA = initdata['Angle A']
initAB = initdata['Angle B']
method = initdata['Method']
timeStep = initdata['Time Step']
nCycles = initdata['Number of Cycles']
pend = DP(initMA, initMB, initLA, initLB, math.radians(initAA), math.
radians(initAB), [0, 0], [0, 0], [0, 0], [0, 0], 0, 0, 1, 1, 1, 1, 1, 1, 1)
pend.updCartesian()
pend.updEnergies()
data = []
time = 0
x1 = 0
x2 = 0
y1 = 0
y2 = 0
if method == 1:
for n in range(nCycles):
time += timeStep
pend.updEuler(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
print(p22)
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 2:
for n in range(nCycles):
print(n)
time += timeStep
pend.updEulerCromer(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 3:
for n in range(nCycles):
print(n)
time += timeStep
pend.updRungeKutta(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),
copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy
(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy
(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),
copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
else:
print('invalid method selection, update config file')
exit()
np.save(Path.cwd() / 'datafile', data, allow_pickle=True)
print('data file saved')
<|reserved_special_token_1|>
import numpy as np
import matplotlib as plt
import math
from DoublePendulum import DP #imports useful modules and double pendulum class from DoublePendulum.py
import json
import pandas as pd
import copy
from pathlib import Path
#accessing config file
with open('config.json') as config_file:
initdata = json.load(config_file)
#retrieving variables from config file
initMA = initdata['Mass A']
initMB = initdata['Mass B']
initLA = initdata['Length A']
initLB = initdata['Length B']
initAA = initdata['Angle A']
initAB = initdata['Angle B']
method = initdata['Method']
timeStep = initdata['Time Step']
nCycles = initdata['Number of Cycles']
# Setting Initial Conditions based on the config file
pend = DP(initMA,initMB,initLA,initLB,math.radians(initAA),math.radians(initAB),[0,0],[0,0],[0,0],[0,0],0,0,1,1,1,1,1,1,1)
pend.updCartesian()
pend.updEnergies()
data = []
time = 0
x1 = 0
x2 = 0
y1 = 0
y2 = 0
if method == 1:
for n in range(nCycles):
#print(n)
time += timeStep
pend.updEuler(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
print(p22)
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12), copy.deepcopy(p21), copy.deepcopy(p22)]
data.append(item)
elif method == 2:
for n in range(nCycles):
print(n)
time += timeStep
pend.updEulerCromer(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]
data.append(item)
elif method == 3:
for n in range(nCycles):
print(n)
time += timeStep
pend.updRungeKutta(timeStep)
pend.updCartesian()
pend.updEnergies()
pend.updMomentum()
x1 = pend.xy1[0]
x2 = pend.xy2[0]
y1 = pend.xy1[1]
y2 = pend.xy2[1]
p11 = pend.p1[0]
p12 = pend.p1[1]
p21 = pend.p2[0]
p22 = pend.p2[1]
item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]
data.append(item)
else:
print('invalid method selection, update config file')
exit()
np.save(Path.cwd()/'datafile', data, allow_pickle=True)
print('data file saved')
|
flexible
|
{
"blob_id": "c2b6e51622681ac916e860ed4ff5715808dff102",
"index": 9725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('config.json') as config_file:\n initdata = json.load(config_file)\n<mask token>\npend.updCartesian()\npend.updEnergies()\n<mask token>\nif method == 1:\n for n in range(nCycles):\n time += timeStep\n pend.updEuler(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n print(p22)\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 2:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updEulerCromer(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 3:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updRungeKutta(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelse:\n print('invalid method selection, update config file')\n exit()\nnp.save(Path.cwd() / 'datafile', data, allow_pickle=True)\nprint('data file saved')\n",
"step-3": "<mask token>\nwith open('config.json') as config_file:\n initdata = json.load(config_file)\ninitMA = initdata['Mass A']\ninitMB = initdata['Mass B']\ninitLA = initdata['Length A']\ninitLB = initdata['Length B']\ninitAA = initdata['Angle A']\ninitAB = initdata['Angle B']\nmethod = initdata['Method']\ntimeStep = initdata['Time Step']\nnCycles = initdata['Number of Cycles']\npend = DP(initMA, initMB, initLA, initLB, math.radians(initAA), math.\n radians(initAB), [0, 0], [0, 0], [0, 0], [0, 0], 0, 0, 1, 1, 1, 1, 1, 1, 1)\npend.updCartesian()\npend.updEnergies()\ndata = []\ntime = 0\nx1 = 0\nx2 = 0\ny1 = 0\ny2 = 0\nif method == 1:\n for n in range(nCycles):\n time += timeStep\n pend.updEuler(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n print(p22)\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 2:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updEulerCromer(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 3:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updRungeKutta(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelse:\n print('invalid method selection, update config file')\n exit()\nnp.save(Path.cwd() / 'datafile', data, allow_pickle=True)\nprint('data file saved')\n",
"step-4": "import numpy as np\nimport matplotlib as plt\nimport math\nfrom DoublePendulum import DP\nimport json\nimport pandas as pd\nimport copy\nfrom pathlib import Path\nwith open('config.json') as config_file:\n initdata = json.load(config_file)\ninitMA = initdata['Mass A']\ninitMB = initdata['Mass B']\ninitLA = initdata['Length A']\ninitLB = initdata['Length B']\ninitAA = initdata['Angle A']\ninitAB = initdata['Angle B']\nmethod = initdata['Method']\ntimeStep = initdata['Time Step']\nnCycles = initdata['Number of Cycles']\npend = DP(initMA, initMB, initLA, initLB, math.radians(initAA), math.\n radians(initAB), [0, 0], [0, 0], [0, 0], [0, 0], 0, 0, 1, 1, 1, 1, 1, 1, 1)\npend.updCartesian()\npend.updEnergies()\ndata = []\ntime = 0\nx1 = 0\nx2 = 0\ny1 = 0\ny2 = 0\nif method == 1:\n for n in range(nCycles):\n time += timeStep\n pend.updEuler(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n print(p22)\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 2:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updEulerCromer(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelif method == 3:\n for n in range(nCycles):\n print(n)\n time += timeStep\n pend.updRungeKutta(timeStep)\n pend.updCartesian()\n pend.updEnergies()\n pend.updMomentum()\n x1 = pend.xy1[0]\n x2 = pend.xy2[0]\n y1 = pend.xy1[1]\n y2 = pend.xy2[1]\n p11 = pend.p1[0]\n p12 = pend.p1[1]\n p21 = pend.p2[0]\n p22 = pend.p2[1]\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1),\n copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy\n (pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy\n (y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12),\n copy.deepcopy(p21), copy.deepcopy(p22)]\n data.append(item)\nelse:\n print('invalid method selection, update config file')\n exit()\nnp.save(Path.cwd() / 'datafile', data, allow_pickle=True)\nprint('data file saved')\n",
"step-5": "import numpy as np \r\nimport matplotlib as plt\r\nimport math\r\nfrom DoublePendulum import DP #imports useful modules and double pendulum class from DoublePendulum.py\r\nimport json\r\nimport pandas as pd\r\nimport copy\r\nfrom pathlib import Path\r\n\r\n#accessing config file\r\nwith open('config.json') as config_file:\r\n initdata = json.load(config_file)\r\n\r\n#retrieving variables from config file\r\ninitMA = initdata['Mass A']\r\ninitMB = initdata['Mass B']\r\ninitLA = initdata['Length A']\r\ninitLB = initdata['Length B']\r\ninitAA = initdata['Angle A']\r\ninitAB = initdata['Angle B']\r\nmethod = initdata['Method']\r\ntimeStep = initdata['Time Step']\r\nnCycles = initdata['Number of Cycles']\r\n\r\n# Setting Initial Conditions based on the config file\r\npend = DP(initMA,initMB,initLA,initLB,math.radians(initAA),math.radians(initAB),[0,0],[0,0],[0,0],[0,0],0,0,1,1,1,1,1,1,1)\r\npend.updCartesian()\r\npend.updEnergies()\r\ndata = []\r\ntime = 0\r\nx1 = 0\r\nx2 = 0\r\ny1 = 0\r\ny2 = 0\r\n\r\nif method == 1:\r\n for n in range(nCycles):\r\n #print(n)\r\n time += timeStep\r\n pend.updEuler(timeStep)\r\n pend.updCartesian()\r\n pend.updEnergies()\r\n pend.updMomentum()\r\n x1 = pend.xy1[0]\r\n x2 = pend.xy2[0]\r\n y1 = pend.xy1[1]\r\n y2 = pend.xy2[1]\r\n p11 = pend.p1[0]\r\n p12 = pend.p1[1]\r\n p21 = pend.p2[0]\r\n p22 = pend.p2[1]\r\n print(p22)\r\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2), copy.deepcopy(p11), copy.deepcopy(p12), copy.deepcopy(p21), copy.deepcopy(p22)]\r\n data.append(item)\r\nelif method == 2:\r\n for n in range(nCycles):\r\n print(n)\r\n time += timeStep\r\n pend.updEulerCromer(timeStep)\r\n pend.updCartesian()\r\n pend.updEnergies()\r\n pend.updMomentum()\r\n x1 = pend.xy1[0]\r\n x2 = pend.xy2[0]\r\n y1 = pend.xy1[1]\r\n y2 = pend.xy2[1]\r\n p11 = pend.p1[0]\r\n p12 = pend.p1[1]\r\n p21 = pend.p2[0]\r\n p22 = pend.p2[1]\r\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]\r\n data.append(item)\r\nelif method == 3:\r\n for n in range(nCycles):\r\n print(n)\r\n time += timeStep\r\n pend.updRungeKutta(timeStep)\r\n pend.updCartesian()\r\n pend.updEnergies()\r\n pend.updMomentum()\r\n x1 = pend.xy1[0]\r\n x2 = pend.xy2[0]\r\n y1 = pend.xy1[1]\r\n y2 = pend.xy2[1]\r\n p11 = pend.p1[0]\r\n p12 = pend.p1[1]\r\n p21 = pend.p2[0]\r\n p22 = pend.p2[1]\r\n item = [time, copy.deepcopy(pend.totalE), copy.deepcopy(pend.KE1), copy.deepcopy(pend.KE2), copy.deepcopy(pend.PE1), copy.deepcopy(pend.PE2), copy.deepcopy(x1), copy.deepcopy(x2), copy.deepcopy(y1), copy.deepcopy(y2),copy.deepcopy(p11),copy.deepcopy(p12),copy.deepcopy(p21),copy.deepcopy(p22)]\r\n data.append(item)\r\nelse:\r\n print('invalid method selection, update config file')\r\n exit()\r\n\r\nnp.save(Path.cwd()/'datafile', data, allow_pickle=True)\r\nprint('data file saved')\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=
2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,
artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
<|reserved_special_token_0|>
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=
sample_rate)
<|reserved_special_token_0|>
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=
2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,
artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=
sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../shared')
<|reserved_special_token_0|>
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=
2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,
artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=
sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
<|reserved_special_token_1|>
import sys
sys.path.append('../shared')
from gumpy import signal
import numpy as np
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=
2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,
artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=
sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Add gumpy path
sys.path.append('../shared')
from gumpy import signal
import numpy as np
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,
hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
|
flexible
|
{
"blob_id": "5f1cbe1019f218d2aad616ea8bbe760ea760534c",
"index": 9359,
"step-1": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\n<mask token>\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\n<mask token>\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-2": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-3": "<mask token>\nsys.path.append('../shared')\n<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-4": "import sys\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\n# Add gumpy path\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,\n hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import sys
import os
import csv
import urllib2, socket, time
import gzip, StringIO
import re, random, types
from bs4 import BeautifulSoup
from datetime import datetime
import json
from HTMLParser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def extractData(url,title):
data=""
req=urllib2.Request(url)
response=urllib2.urlopen(req)
html_data=response.read()
soup=BeautifulSoup(html_data)
[s.extract() for s in soup('script')]
d=re.compile(r'.*%s.*' % title)
last_elem=0
for elem in soup(text=d):
last_elem=elem
if last_elem!=0:
p1=last_elem.parent
try1=1
while len(data)<1000:
parent=p1.parent
p1=parent
data=""
for each_child in parent.findChildren():
data+=each_child.get_text().strip().replace('\n','')
print try1
try1+=1
else:
data=""
for each_child in soup.body.findChildren():
data+=each_child.get_text().strip().replace('\n','')
return data
def readData(input_file):
data=json.loads(input_file.read())
for each_r in data:
if each_r['ID']>=1:
s = MLStripper()
s.feed(each_r['title'])
title =s.get_data()
val=len(title)/2
val=val/2
print title[:-val]
article_data=extractData(each_r['url'],title)
print 'url',each_r['url']
print article_data
print '##############################################'
raw_input()
if __name__=="__main__":
if sys.argv>=2:
input_file=open(sys.argv[1],"r")
readData(input_file)
else:
print "Usage: python extractnew.py <data_file_location>"
|
normal
|
{
"blob_id": "2d444c00e4dbdcb143d19752cd1a751169de73d3",
"index": 5746,
"step-1": "import sys\nimport os\nimport csv\nimport urllib2, socket, time\nimport gzip, StringIO\nimport re, random, types\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport json\nfrom HTMLParser import HTMLParser\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\ndef extractData(url,title):\n data=\"\"\n req=urllib2.Request(url)\n response=urllib2.urlopen(req)\n html_data=response.read() \n soup=BeautifulSoup(html_data)\n [s.extract() for s in soup('script')]\n d=re.compile(r'.*%s.*' % title)\n last_elem=0\n for elem in soup(text=d):\n last_elem=elem\n if last_elem!=0: \n p1=last_elem.parent \n try1=1 \n while len(data)<1000: \n parent=p1.parent\n p1=parent\n data=\"\" \n for each_child in parent.findChildren():\n data+=each_child.get_text().strip().replace('\\n','') \n print try1\n try1+=1 \n else:\n data=\"\" \n for each_child in soup.body.findChildren():\n data+=each_child.get_text().strip().replace('\\n','') \n return data\n\n\ndef readData(input_file):\n data=json.loads(input_file.read())\n for each_r in data:\n if each_r['ID']>=1:\n s = MLStripper()\n s.feed(each_r['title'])\n title =s.get_data() \n val=len(title)/2\n val=val/2\n print title[:-val]\n article_data=extractData(each_r['url'],title)\n print 'url',each_r['url'] \n print article_data\n print '##############################################'\n raw_input() \nif __name__==\"__main__\":\n if sys.argv>=2:\n input_file=open(sys.argv[1],\"r\")\n readData(input_file)\n else:\n print \"Usage: python extractnew.py <data_file_location>\" \n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class SolveItCommand(sublime_plugin.TextCommand):
<|reserved_special_token_0|>
def run(self, _):
window = self.view.window()
window.show_input_panel('Enter ContestID & ProblemID : ', '', self.
on_done, self.on_change, self.on_cancel)
def on_done(self, input_data):
process(input_data)
<|reserved_special_token_0|>
def on_cancel(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
window.show_input_panel('Enter ContestID & ProblemID : ', '', self.
on_done, self.on_change, self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process(string):
filename = sublime.active_window().active_view().file_name()
contestid, problem = string.strip().split()
executor_url = '127.0.0.1:9222'
url = 'codeforces.com/contest/' + contestid + '/problem/' + problem
_chrome_options = Options()
_chrome_options.add_argument('disable-infobars')
_chrome_options.add_argument('--start-maximized')
_chrome_options.add_experimental_option('debuggerAddress', executor_url)
try:
driver = webdriver.Chrome(options=_chrome_options)
driver.implicitly_wait(30)
try:
driver.get('http://' + url.rstrip())
driver.find_element_by_name('sourceFile')
driver.find_element_by_css_selector('input[type="file"]').clear()
driver.find_element_by_css_selector('input[type="file"]'
).send_keys(filename.rstrip())
driver.find_element_by_class_name('submit').click()
except Exception:
sublime.error_message(
'Either Codeforces is too busy or File is Untitled.'
)
except Exception:
sublime.error_message('Server is not active.')
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
window.show_input_panel('Enter ContestID & ProblemID : ', '', self.
on_done, self.on_change, self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sublime
import sublime_plugin
<|reserved_special_token_0|>
def process(string):
filename = sublime.active_window().active_view().file_name()
contestid, problem = string.strip().split()
executor_url = '127.0.0.1:9222'
url = 'codeforces.com/contest/' + contestid + '/problem/' + problem
_chrome_options = Options()
_chrome_options.add_argument('disable-infobars')
_chrome_options.add_argument('--start-maximized')
_chrome_options.add_experimental_option('debuggerAddress', executor_url)
try:
driver = webdriver.Chrome(options=_chrome_options)
driver.implicitly_wait(30)
try:
driver.get('http://' + url.rstrip())
driver.find_element_by_name('sourceFile')
driver.find_element_by_css_selector('input[type="file"]').clear()
driver.find_element_by_css_selector('input[type="file"]'
).send_keys(filename.rstrip())
driver.find_element_by_class_name('submit').click()
except Exception:
sublime.error_message(
'Either Codeforces is too busy or File is Untitled.'
)
except Exception:
sublime.error_message('Server is not active.')
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
window.show_input_panel('Enter ContestID & ProblemID : ', '', self.
on_done, self.on_change, self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sublime
import sublime_plugin
"""
Copy and Paste selinium module and urllib3 module of Python in
"sublime-text-3/Lib/Python3.3" folder of sublime-text3
"""
def process(string):
# Get active file name
filename = sublime.active_window().active_view().file_name()
contestid, problem = string.strip().split()
# Change executor_url according to your preference
executor_url = "127.0.0.1:9222" # change 9222 to the port you have used.
url = "codeforces.com/contest/" + contestid + "/problem/" + problem
_chrome_options = Options()
_chrome_options.add_argument('disable-infobars')
_chrome_options.add_argument("--start-maximized")
_chrome_options.add_experimental_option("debuggerAddress", executor_url)
try:
driver = webdriver.Chrome(options=_chrome_options)
driver.implicitly_wait(30)
try:
driver.get("http://" + url.rstrip())
driver.find_element_by_name("sourceFile")
driver.find_element_by_css_selector('input[type="file"]').clear()
# Send File to Codeforces
driver.find_element_by_css_selector(
'input[type="file"]').send_keys(filename.rstrip())
# Click on submit button
driver.find_element_by_class_name("submit").click()
except Exception:
# In case Codeforces is too busy or File is untitled.
sublime.error_message('Either Codeforces is too busy or \
File is Untitled.')
except Exception:
# In case Server is not active.
sublime.error_message('Server is not active.')
class SolveItCommand(sublime_plugin.TextCommand):
"""
Submit solution from sublime by getting contest ID and problem ID
from the user
"""
def run(self, _):
window = self.view.window()
# Input Panel to get Contest ID and Problem ID from the user
window.show_input_panel(
"Enter ContestID & ProblemID : ",
"",
self.on_done,
self.on_change,
self.on_cancel)
def on_done(self, input_data):
process(input_data)
def on_change(self, input_data):
pass
def on_cancel(self):
pass
|
flexible
|
{
"blob_id": "9767014992981001bd2e8dece67525650c05a2a8",
"index": 4018,
"step-1": "<mask token>\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n <mask token>\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n <mask token>\n\n def on_cancel(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-3": "<mask token>\n\n\ndef process(string):\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n executor_url = '127.0.0.1:9222'\n url = 'codeforces.com/contest/' + contestid + '/problem/' + problem\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument('--start-maximized')\n _chrome_options.add_experimental_option('debuggerAddress', executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n try:\n driver.get('http://' + url.rstrip())\n driver.find_element_by_name('sourceFile')\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n driver.find_element_by_css_selector('input[type=\"file\"]'\n ).send_keys(filename.rstrip())\n driver.find_element_by_class_name('submit').click()\n except Exception:\n sublime.error_message(\n 'Either Codeforces is too busy or File is Untitled.'\n )\n except Exception:\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport sublime\nimport sublime_plugin\n<mask token>\n\n\ndef process(string):\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n executor_url = '127.0.0.1:9222'\n url = 'codeforces.com/contest/' + contestid + '/problem/' + problem\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument('--start-maximized')\n _chrome_options.add_experimental_option('debuggerAddress', executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n try:\n driver.get('http://' + url.rstrip())\n driver.find_element_by_name('sourceFile')\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n driver.find_element_by_css_selector('input[type=\"file\"]'\n ).send_keys(filename.rstrip())\n driver.find_element_by_class_name('submit').click()\n except Exception:\n sublime.error_message(\n 'Either Codeforces is too busy or File is Untitled.'\n )\n except Exception:\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n\n def run(self, _):\n window = self.view.window()\n window.show_input_panel('Enter ContestID & ProblemID : ', '', self.\n on_done, self.on_change, self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nimport sublime\nimport sublime_plugin\n\n\"\"\"\nCopy and Paste selinium module and urllib3 module of Python in\n\"sublime-text-3/Lib/Python3.3\" folder of sublime-text3\n\"\"\"\n\n\ndef process(string):\n\n # Get active file name\n filename = sublime.active_window().active_view().file_name()\n contestid, problem = string.strip().split()\n\n # Change executor_url according to your preference\n executor_url = \"127.0.0.1:9222\" # change 9222 to the port you have used.\n url = \"codeforces.com/contest/\" + contestid + \"/problem/\" + problem\n\n _chrome_options = Options()\n _chrome_options.add_argument('disable-infobars')\n _chrome_options.add_argument(\"--start-maximized\")\n _chrome_options.add_experimental_option(\"debuggerAddress\", executor_url)\n try:\n driver = webdriver.Chrome(options=_chrome_options)\n driver.implicitly_wait(30)\n\n try:\n driver.get(\"http://\" + url.rstrip())\n driver.find_element_by_name(\"sourceFile\")\n driver.find_element_by_css_selector('input[type=\"file\"]').clear()\n # Send File to Codeforces\n driver.find_element_by_css_selector(\n 'input[type=\"file\"]').send_keys(filename.rstrip())\n # Click on submit button\n driver.find_element_by_class_name(\"submit\").click()\n except Exception:\n # In case Codeforces is too busy or File is untitled.\n sublime.error_message('Either Codeforces is too busy or \\\n File is Untitled.')\n except Exception:\n # In case Server is not active.\n sublime.error_message('Server is not active.')\n\n\nclass SolveItCommand(sublime_plugin.TextCommand):\n \"\"\"\n Submit solution from sublime by getting contest ID and problem ID\n from the user\n \"\"\"\n def run(self, _):\n window = self.view.window()\n # Input Panel to get Contest ID and Problem ID from the user\n window.show_input_panel(\n \"Enter ContestID & ProblemID : \",\n \"\",\n self.on_done,\n self.on_change,\n self.on_cancel)\n\n def on_done(self, input_data):\n process(input_data)\n\n def on_change(self, input_data):\n pass\n\n def on_cancel(self):\n pass\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('商品编码.txt', 'rt') as f:
data = f.read()
<|reserved_special_token_0|>
for x in data:
if count < 3:
count += 1
continue
x = x.split(',')
column = 0
for e in x:
if row == 0 and column == 0:
e = e[3:]
worksheet.write(row, column, e)
column += 1
row += 1
workbook.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
workbook = xlsxwriter.Workbook('商品编码.xlsx')
worksheet = workbook.add_worksheet()
with open('商品编码.txt', 'rt') as f:
data = f.read()
data = data.splitlines(True)
count = 1
row = 0
for x in data:
if count < 3:
count += 1
continue
x = x.split(',')
column = 0
for e in x:
if row == 0 and column == 0:
e = e[3:]
worksheet.write(row, column, e)
column += 1
row += 1
workbook.close()
<|reserved_special_token_1|>
import xlsxwriter
workbook = xlsxwriter.Workbook('商品编码.xlsx')
worksheet = workbook.add_worksheet()
with open('商品编码.txt', 'rt') as f:
data = f.read()
data = data.splitlines(True)
count = 1
row = 0
for x in data:
if count < 3:
count += 1
continue
x = x.split(',')
column = 0
for e in x:
if row == 0 and column == 0:
e = e[3:]
worksheet.write(row, column, e)
column += 1
row += 1
workbook.close()
|
flexible
|
{
"blob_id": "59a8a4cf4b04a191bfb70fd07668141dbfeda790",
"index": 6822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\n<mask token>\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-3": "<mask token>\nworkbook = xlsxwriter.Workbook('商品编码.xlsx')\nworksheet = workbook.add_worksheet()\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\ndata = data.splitlines(True)\ncount = 1\nrow = 0\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-4": "import xlsxwriter\nworkbook = xlsxwriter.Workbook('商品编码.xlsx')\nworksheet = workbook.add_worksheet()\nwith open('商品编码.txt', 'rt') as f:\n data = f.read()\ndata = data.splitlines(True)\ncount = 1\nrow = 0\nfor x in data:\n if count < 3:\n count += 1\n continue\n x = x.split(',')\n column = 0\n for e in x:\n if row == 0 and column == 0:\n e = e[3:]\n worksheet.write(row, column, e)\n column += 1\n row += 1\nworkbook.close()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
def projectionArea(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
res = 0
for i in grid:
res += max(i)
for j in i:
if j:
res += 1
for k in zip(*grid):
res += max(k)
return res
|
normal
|
{
"blob_id": "62fc71e26ba3788513e5e52efc5f20453080837d",
"index": 8514,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def projectionArea(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n res = 0\n for i in grid:\n res += max(i)\n for j in i:\n if j:\n res += 1\n for k in zip(*grid):\n res += max(k)\n return res\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding=utf-8
import base64
from sandcrawler.scraper import ScraperBase, SimpleScraperBase
class Hdmovie14Ag(SimpleScraperBase):
BASE_URL = 'http://www1.solarmovie.net'
OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']
SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]
LANGUAGE = 'eng'
MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]
URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]
def _fetch_search_url(self, search_term, media_type):
return '{base_url}/search-movies/{search_term}.html'.format(base_url=self.BASE_URL, search_term=search_term)
def _fetch_no_results_text(self):
return None
def _fetch_next_button(self, soup):
next_button = soup.find('a', text=u'»')
if next_button:
return next_button.href
return None
def _parse_search_result_page(self, soup):
found=0
for result in soup.select('div.ml-item'):
link = result.select_one('a')
self.submit_search_result(
link_url=link.href,
link_title=link.text,
image=self.util.find_image_src_or_none(result, 'img'),
)
found=1
if not found:
return self.submit_search_no_results()
def _parse_parse_page(self, soup):
index_page_title = self.util.get_page_title(soup)
series_season = series_episode = None
title = soup.select_one('h1')
if title and title.text:
series_season, series_episode = self.util.extract_season_episode(title.text)
for results in soup.select('div.server_line'):
try:
movie_link = self.make_soup(base64.decodestring(self.get_soup(results.select_one('a').href).
select_one('div#media-player script').text.split('("')[-1].
split('")')[0])).select_one('iframe')['src']
except AttributeError:
movie_link = self.get_soup(results.select_one('a').href).select_one('div#media-player a')['href']
self.submit_parse_result(
index_page_title=index_page_title,
link_url=movie_link,
link_title=movie_link,
series_season=series_season,
series_episode=series_episode,
)
|
normal
|
{
"blob_id": "27a12a0f5ea6120036b66ee1cdd903da868a037f",
"index": 952,
"step-1": "<mask token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n <mask token>\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"step-2": "<mask token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"step-3": "<mask token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ScraperBase.SCRAPER_TYPE_OSP]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV]\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"step-4": "import base64\nfrom sandcrawler.scraper import ScraperBase, SimpleScraperBase\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ScraperBase.SCRAPER_TYPE_OSP]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV]\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"step-5": "# coding=utf-8\nimport base64\nfrom sandcrawler.scraper import ScraperBase, SimpleScraperBase\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]\n\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url=self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found=0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(\n link_url=link.href,\n link_title=link.text,\n image=self.util.find_image_src_or_none(result, 'img'),\n )\n found=1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.get_soup(results.select_one('a').href).\n select_one('div#media-player script').text.split('(\"')[-1].\n split('\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href).select_one('div#media-player a')['href']\n self.submit_parse_result(\n index_page_title=index_page_title,\n link_url=movie_link,\n link_title=movie_link,\n series_season=series_season,\n series_episode=series_episode,\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s, space, k = raw_input().partition(' ')
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
<|reserved_special_token_1|>
from __future__ import print_function
from itertools import permutations
s, space, k = raw_input().partition(' ')
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
|
flexible
|
{
"blob_id": "37580939a0e58bdffb8cfad8252f339a7da4446e",
"index": 1130,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor t in sorted(list(permutations(s, int(k)))):\n print(*t, sep='')\n",
"step-3": "<mask token>\ns, space, k = raw_input().partition(' ')\nfor t in sorted(list(permutations(s, int(k)))):\n print(*t, sep='')\n",
"step-4": "from __future__ import print_function\nfrom itertools import permutations\ns, space, k = raw_input().partition(' ')\nfor t in sorted(list(permutations(s, int(k)))):\n print(*t, sep='')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if v1 > 18 and v1 < 60:
print(v1)
elif v2 > 18 and v2 < 60:
print(v2)
elif v3 > 18 and v3 < 60:
print(v3)
<|reserved_special_token_1|>
v1 = int(input('Introdu virsta primei persoane'))
v2 = int(input('Introdu virsta persoanei a doua'))
v3 = int(input('Introdu virsta persoanei a treia'))
if v1 > 18 and v1 < 60:
print(v1)
elif v2 > 18 and v2 < 60:
print(v2)
elif v3 > 18 and v3 < 60:
print(v3)
<|reserved_special_token_1|>
v1=int(input("Introdu virsta primei persoane"))
v2=int(input("Introdu virsta persoanei a doua"))
v3=int(input("Introdu virsta persoanei a treia"))
if ((v1>18)and(v1<60)):
print(v1)
elif((v2>18)and(v2<60)):
print(v2)
elif((v3>18)and(v3<60)):
print(v3)
|
flexible
|
{
"blob_id": "b8c749052af0061373808addea3ad419c35e1a29",
"index": 3324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif v1 > 18 and v1 < 60:\n print(v1)\nelif v2 > 18 and v2 < 60:\n print(v2)\nelif v3 > 18 and v3 < 60:\n print(v3)\n",
"step-3": "v1 = int(input('Introdu virsta primei persoane'))\nv2 = int(input('Introdu virsta persoanei a doua'))\nv3 = int(input('Introdu virsta persoanei a treia'))\nif v1 > 18 and v1 < 60:\n print(v1)\nelif v2 > 18 and v2 < 60:\n print(v2)\nelif v3 > 18 and v3 < 60:\n print(v3)\n",
"step-4": "v1=int(input(\"Introdu virsta primei persoane\"))\r\nv2=int(input(\"Introdu virsta persoanei a doua\"))\r\nv3=int(input(\"Introdu virsta persoanei a treia\"))\r\nif ((v1>18)and(v1<60)):\r\n print(v1)\r\nelif((v2>18)and(v2<60)):\r\n print(v2)\r\nelif((v3>18)and(v3<60)):\r\n print(v3)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from typing import List, Tuple
from unittest import TestCase
from solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration
class TestTiming(TestCase):
def test_decompose_ns(self):
# Given
duration: int = 234
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_us(self):
# Given
duration: int = 23456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_ms(self):
# Given
duration: int = 1023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
# Given
duration: int = 45001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
# Given
duration: int = 65001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
# Given
duration: int = 7995125885088
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),
(125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
# Given
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_us(self):
# Given
decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '23.456 μs'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_ms(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
# Given
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
# Given
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
# Given
duration_ns: int = 7995125885088
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
def test_format_duration_us(self):
# Given
duration_ns: int = 23456
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '23.456 μs'
self.assertEqual(expected_formatted_duration, formatted_duration)
|
normal
|
{
"blob_id": "afecbb46a98fbf6b5c26f5b6c8026aec035fadf1",
"index": 6696,
"step-1": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-4": "from typing import List, Tuple\nfrom unittest import TestCase\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n duration: int = 23456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456,\n 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n duration_ns: int = 23456\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-5": "from typing import List, Tuple\nfrom unittest import TestCase\n\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n # Given\n duration: int = 234\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n # Given\n duration: int = 23456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n # Given\n duration: int = 1023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n # Given\n duration: int = 45001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n # Given\n duration: int = 65001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n # Given\n duration: int = 7995125885088\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),\n (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n # Given\n duration_ns: int = 7995125885088\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n # Given\n duration_ns: int = 23456\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-ids": [
7,
11,
12,
16,
17
]
}
|
[
7,
11,
12,
16,
17
] |
import numpy
#calculate field of simple
def dipole(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return (3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5,
3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5,
3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5)
#calculaion only one component of dipole
def dipoleX(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5
def dipoleY(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5
def dipoleZ(x, y, z, dx, dy, dz, mx, my, mz):
R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2
return 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5
#calculate field caused by crack from array of coordinates and magntization of crack parts
def crack(x,y,z,coordinates,magnetization):
ret = numpy.array([0.0]*3)
for it in range(len(coordinates)):
ret+=numpy.array(dipole(x,y,z,coordinates[it][0],coordinates[it][1],coordinates[it][2],magnetization[it][0],magnetization[it][1],magnetization[it][2]))
return ret
#generator of crack parts coordinates and magntization
def crackGenerator(funcCoord, funcMagn,crackLen = 30, paramBouns = [0,1]):
coordinates = []
magnetization = []
for t in numpy.arange(paramBouns[0],paramBouns[1],(paramBouns[1]-paramBouns[0])/crackLen):
coordinates.append(funcCoord(t))
magnetization.append(funcMagn(t))
return coordinates,magnetization
#generates one random crack in volume vol
def randomCrackExampleLinearModel(vol):
sizeMax = (vol[3]/5,vol[4]/5,vol[5]/5)
coordParams = numpy.random.rand(3,2)
return crackGenerator(lambda t:(coordParams[0][0]*vol[3]+vol[0]+t*coordParams[0][1]*sizeMax[0],
coordParams[1][0]*vol[4]+vol[1]+t*coordParams[1][1]*sizeMax[1],
coordParams[2][0]*vol[5]+vol[2]+t*coordParams[2][1]*sizeMax[2]),
lambda t: (0,0,10+numpy.random.rand()*t))
|
normal
|
{
"blob_id": "9d37d1618fb9d00d63b7ed58290c5ba1b8f106cd",
"index": 4599,
"step-1": "import numpy \n\n#calculate field of simple \ndef dipole(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return (3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5,\n 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5,\n 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5)\n#calculaion only one component of dipole \ndef dipoleX(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(x - dx) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mx/R**1.5\ndef dipoleY(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(y - dy) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - my/R**1.5\ndef dipoleZ(x, y, z, dx, dy, dz, mx, my, mz):\n R = (x - dx)**2 + (y - dy)**2 + (z - dz)**2\n return 3.0*(z - dz) * ((x - dx)*mx + (y - dy)*my + (z - dz)*mz) / R**2.5 - mz/R**1.5\n\n#calculate field caused by crack from array of coordinates and magntization of crack parts\ndef crack(x,y,z,coordinates,magnetization):\n ret = numpy.array([0.0]*3)\n for it in range(len(coordinates)):\n ret+=numpy.array(dipole(x,y,z,coordinates[it][0],coordinates[it][1],coordinates[it][2],magnetization[it][0],magnetization[it][1],magnetization[it][2]))\n return ret\n\n#generator of crack parts coordinates and magntization \ndef crackGenerator(funcCoord, funcMagn,crackLen = 30, paramBouns = [0,1]):\n coordinates = []\n magnetization = []\n for t in numpy.arange(paramBouns[0],paramBouns[1],(paramBouns[1]-paramBouns[0])/crackLen):\n coordinates.append(funcCoord(t))\n magnetization.append(funcMagn(t))\n return coordinates,magnetization\n\n#generates one random crack in volume vol\ndef randomCrackExampleLinearModel(vol):\n sizeMax = (vol[3]/5,vol[4]/5,vol[5]/5)\n coordParams = numpy.random.rand(3,2)\n return crackGenerator(lambda t:(coordParams[0][0]*vol[3]+vol[0]+t*coordParams[0][1]*sizeMax[0],\n coordParams[1][0]*vol[4]+vol[1]+t*coordParams[1][1]*sizeMax[1],\n coordParams[2][0]*vol[5]+vol[2]+t*coordParams[2][1]*sizeMax[2]),\n lambda t: (0,0,10+numpy.random.rand()*t))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from typing import Any, Dict, List
import numpy as np
from kedro.io import AbstractDataSet
from msrest.exceptions import HttpOperationError
from azureml.core import Workspace, Datastore
from azureml.data.data_reference import DataReference
class AZblob_datastore_data(AbstractDataSet):
"""``ImageDataSet`` loads / save image data from a given filepath as `numpy` array using Pillow.
Example:
::
>>> ImageDataSet(filepath='/img/file/path.png')
"""
def __init__(self,
container_path: str,
local_path : str,
credentials: Dict[str, Any] = None):
"""Creates a new instance of ImageDataSet to load / save image data at the given filepath.
Args:
filepath: The location of the image file to load / save data.
"""
self._container_path = container_path
self._local_path = local_path
self._credentials = credentials
def _load(self) -> np.ndarray:
"""Loads data from the image file.
Returns:
Data from the image file as a numpy array.
"""
# Initialis Workspace
ws = Workspace.from_config()
blob_datastore_name = self._credentials['storage_name']
account_name = self._credentials['storage_name'] # Storage account name
container_name = self._credentials['container_name'] # Name of Azure blob container
account_key = self._credentials['key'] # Storage account key
# Register a new datastore
try:
blob_datastore = blob_datastore = Datastore.get(ws, blob_datastore_name)
print("Found Blob Datastore with name: %s" % blob_datastore_name)
except HttpOperationError:
blob_datastore = Datastore.register_azure_blob_container(workspace = ws,
datastore_name = blob_datastore_name,
container_name = container_name,
account_name = account_name,
blob_datastore.download(target_path=self._local_path,
prefix=self._container_path,
show_progress=False)
...
def _save(self, data: np.ndarray) -> None:
"""Saves image data to the specified filepath"""
...
def _describe(self) -> Dict[str, Any]:
"""Returns a dict that describes the attributes of the dataset"""
|
normal
|
{
"blob_id": "eb981a2d7f0ff5e6cc4a4a76f269c93c547965ba",
"index": 715,
"step-1": "from typing import Any, Dict, List\n\nimport numpy as np\n\nfrom kedro.io import AbstractDataSet\nfrom msrest.exceptions import HttpOperationError\nfrom azureml.core import Workspace, Datastore\nfrom azureml.data.data_reference import DataReference\n\nclass AZblob_datastore_data(AbstractDataSet):\n \"\"\"``ImageDataSet`` loads / save image data from a given filepath as `numpy` array using Pillow.\n\n Example:\n ::\n\n >>> ImageDataSet(filepath='/img/file/path.png')\n \"\"\"\n\n def __init__(self,\n container_path: str,\n local_path : str,\n credentials: Dict[str, Any] = None):\n \"\"\"Creates a new instance of ImageDataSet to load / save image data at the given filepath.\n\n Args:\n filepath: The location of the image file to load / save data.\n \"\"\"\n self._container_path = container_path\n self._local_path = local_path\n self._credentials = credentials\n\n def _load(self) -> np.ndarray:\n \"\"\"Loads data from the image file.\n\n Returns:\n Data from the image file as a numpy array.\n \"\"\"\n # Initialis Workspace\n\n ws = Workspace.from_config()\n\n blob_datastore_name = self._credentials['storage_name']\n account_name = self._credentials['storage_name'] # Storage account name\n container_name = self._credentials['container_name'] # Name of Azure blob container\n account_key = self._credentials['key'] # Storage account key\n\n # Register a new datastore\n try:\n blob_datastore = blob_datastore = Datastore.get(ws, blob_datastore_name)\n print(\"Found Blob Datastore with name: %s\" % blob_datastore_name)\n\n except HttpOperationError:\n blob_datastore = Datastore.register_azure_blob_container(workspace = ws, \n datastore_name = blob_datastore_name, \n container_name = container_name,\n account_name = account_name,\n blob_datastore.download(target_path=self._local_path,\n prefix=self._container_path,\n show_progress=False) \n ...\n\n def _save(self, data: np.ndarray) -> None:\n \"\"\"Saves image data to the specified filepath\"\"\"\n ...\n\n def _describe(self) -> Dict[str, Any]:\n \n \"\"\"Returns a dict that describes the attributes of the dataset\"\"\"",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/env python
import RPIO
import sys
RPIO.setwarnings(False)
gpio = int(sys.argv[1])
RPIO.setup(gpio, RPIO.OUT)
input_value = RPIO.input(gpio)
print input_value
|
normal
|
{
"blob_id": "382597628b999f2984dba09405d9ff3dd2f35872",
"index": 6765,
"step-1": "#! /usr/bin/env python\n\nimport RPIO\nimport sys\n\nRPIO.setwarnings(False)\n\ngpio = int(sys.argv[1])\n\nRPIO.setup(gpio, RPIO.OUT)\ninput_value = RPIO.input(gpio)\n\nprint input_value",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2008 Rok Garbas <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
__docformat__ = "reStructuredText"
import z3c.form
import zope.schema
import zope.interface
import zope.component
from widget_date import DateWidget
from interfaces import IMonthYearWidget
class MonthYearWidget(DateWidget):
""" Month and year widget """
zope.interface.implementsOnly(IMonthYearWidget)
klass = u'monthyear-widget'
value = ('', '', 1)
@zope.component.adapter(zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)
@zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def MonthYearFieldWidget(field, request):
"""IFieldWidget factory for MonthYearWidget."""
return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))
|
normal
|
{
"blob_id": "d0f9dd0a06023dd844b0bf70dff360f6bb46c152",
"index": 4412,
"step-1": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-3": "__docformat__ = 'reStructuredText'\n<mask token>\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-4": "__docformat__ = 'reStructuredText'\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n zope.interface.implementsOnly(IMonthYearWidget)\n klass = u'monthyear-widget'\n value = '', '', 1\n\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.\n IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n",
"step-5": "#-*- coding: utf-8 -*- \n\n#############################################################################\n# #\n# Copyright (c) 2008 Rok Garbas <[email protected]> #\n# #\n# This program is free software; you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation; either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\n# #\n#############################################################################\n__docformat__ = \"reStructuredText\"\n\nimport z3c.form\nimport zope.schema\nimport zope.interface\nimport zope.component\nfrom widget_date import DateWidget\nfrom interfaces import IMonthYearWidget\n\n\nclass MonthYearWidget(DateWidget):\n \"\"\" Month and year widget \"\"\"\n\n zope.interface.implementsOnly(IMonthYearWidget)\n\n klass = u'monthyear-widget'\n value = ('', '', 1)\n\[email protected](zope.schema.interfaces.IField, z3c.form.interfaces.IFormLayer)\[email protected](z3c.form.interfaces.IFieldWidget)\ndef MonthYearFieldWidget(field, request):\n \"\"\"IFieldWidget factory for MonthYearWidget.\"\"\"\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Pygments(Directive):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in (
'table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'anchorlinenos': directives.flag, 'classprefix':
directives.unchanged, 'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged, 'linenos': directives.
unchanged, 'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int, 'linenostep': directives
.nonnegative_int, 'lineseparator': directives.unchanged,
'linespans': directives.unchanged, 'nobackground': directives.flag,
'nowrap': directives.flag, 'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in (
'table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'anchorlinenos': directives.flag, 'classprefix':
directives.unchanged, 'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged, 'linenos': directives.
unchanged, 'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int, 'linenostep': directives
.nonnegative_int, 'lineseparator': directives.unchanged,
'linespans': directives.unchanged, 'nobackground': directives.flag,
'nowrap': directives.flag, 'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in (
'table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
def register():
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
<|reserved_special_token_1|>
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.lexers.special import TextLexer
from pygments.formatters.html import HtmlFormatter
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'anchorlinenos': directives.flag, 'classprefix':
directives.unchanged, 'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged, 'linenos': directives.
unchanged, 'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int, 'linenostep': directives
.nonnegative_int, 'lineseparator': directives.unchanged,
'linespans': directives.unchanged, 'nobackground': directives.flag,
'nowrap': directives.flag, 'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in (
'table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
def register():
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
<|reserved_special_token_1|>
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.lexers.special import TextLexer
from pygments.formatters.html import HtmlFormatter
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'anchorlinenos': directives.flag,
'classprefix': directives.unchanged,
'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged,
'linenos': directives.unchanged,
'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int,
'linenostep': directives.nonnegative_int,
'lineseparator': directives.unchanged,
'linespans': directives.unchanged,
'nobackground': directives.flag,
'nowrap': directives.flag,
'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged,
}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
if 'linenos' in self.options and self.options['linenos'] not in ('table', 'inline'):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
else:
self.options['linenos'] = 'table'
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
if flag in self.options:
self.options[flag] = True
# noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
def register():
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
|
flexible
|
{
"blob_id": "d3dcef6a1a6bcfc1161c4de46081703b8fe7016d",
"index": 9606,
"step-1": "<mask token>\n\n\nclass Pygments(Directive):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-4": "from docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.lexers.special import TextLexer\nfrom pygments.formatters.html import HtmlFormatter\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'anchorlinenos': directives.flag, 'classprefix':\n directives.unchanged, 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged, 'linenos': directives.\n unchanged, 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int, 'linenostep': directives\n .nonnegative_int, 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged, 'nobackground': directives.flag,\n 'nowrap': directives.flag, 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged}\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n lexer = TextLexer()\n if 'linenos' in self.options and self.options['linenos'] not in (\n 'table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-5": "from docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.lexers.special import TextLexer\nfrom pygments.formatters.html import HtmlFormatter\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax hightlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {\n 'anchorlinenos': directives.flag,\n 'classprefix': directives.unchanged,\n 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged,\n 'linenos': directives.unchanged,\n 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int,\n 'linenostep': directives.nonnegative_int,\n 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged,\n 'nobackground': directives.flag,\n 'nowrap': directives.flag,\n 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged,\n }\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n\n if 'linenos' in self.options and self.options['linenos'] not in ('table', 'inline'):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n\n # noclasses should already default to False, but just in case...\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\n\ndef register():\n directives.register_directive('code-block', Pygments)\n directives.register_directive('sourcecode', Pygments)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def update_album(user, imgur_client, reddit_client):
return
<|reserved_special_token_0|>
def is_gif(url):
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = (
'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'
.format(user=album_user.name, album_url=album_url, num_images=
num_images, num_gifs=num_gifs))
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}
)
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top('all'):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config
=None, anon=False)
images.append(image['id'])
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album['id'], images)
return album['id']
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=
config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)
client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=
config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print('Logged in at: {time}'.format(time=login_time))
print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))
print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))
print('{api_calls} Imgur API calls remaining for the day.'.format(
api_calls=client.credits['ClientRemaining']))
print('----------')
default_url = 'https://imgur.com/'
command_call = '!compile-album'
subreddit = reddit.subreddit('all')
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == 't1_':
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.
link, album.images_count, 0)
elif parent_id[0:3] == 't3_':
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client,
reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album
.link, album.images_count, 0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = (
'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'
.format(user=album_user.name, album_url=album_url, num_images=
num_images, num_gifs=num_gifs))
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}
)
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top('all'):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config
=None, anon=False)
images.append(image['id'])
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album['id'], images)
return album['id']
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=
config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)
client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=
config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print('Logged in at: {time}'.format(time=login_time))
print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))
print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))
print('{api_calls} Imgur API calls remaining for the day.'.format(
api_calls=client.credits['ClientRemaining']))
print('----------')
default_url = 'https://imgur.com/'
command_call = '!compile-album'
subreddit = reddit.subreddit('all')
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == 't1_':
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.
link, album.images_count, 0)
elif parent_id[0:3] == 't3_':
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client,
reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album
.link, album.images_count, 0)
run_bot()
<|reserved_special_token_1|>
import praw
import config
from imgurpython import ImgurClient
import datetime
from time import sleep
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = (
'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'
.format(user=album_user.name, album_url=album_url, num_images=
num_images, num_gifs=num_gifs))
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}
)
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top('all'):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config
=None, anon=False)
images.append(image['id'])
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album['id'], images)
return album['id']
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=
config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)
client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=
config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print('Logged in at: {time}'.format(time=login_time))
print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))
print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))
print('{api_calls} Imgur API calls remaining for the day.'.format(
api_calls=client.credits['ClientRemaining']))
print('----------')
default_url = 'https://imgur.com/'
command_call = '!compile-album'
subreddit = reddit.subreddit('all')
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == 't1_':
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.
link, album.images_count, 0)
elif parent_id[0:3] == 't3_':
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client,
reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album
.link, album.images_count, 0)
run_bot()
<|reserved_special_token_1|>
import praw
import config
from imgurpython import ImgurClient
import datetime
from time import sleep
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = "Here is an album of all unique image/gif posts made by " \
"[{user}]({album_url}). ({num_images} images" \
")".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({"title": user.name, "privacy": "hidden"})
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top("all"):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config=None, anon=False)
images.append(image["id"])
# Sleep command to avoid exceeding rate limit
# 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album["id"], images)
return album["id"]
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(
client_id=config.CLIENT_ID_REDDIT,
client_secret=config.SECRET_CODE_REDDIT,
user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT,
password=config.PASSWORD_REDDIT
)
client=ImgurClient(
client_id=config.CLIENT_ID_IMGUR,
client_secret=config.SECRET_CODE_IMGUR,
access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR
)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print("Logged in at: {time}".format(time = login_time))
print("Logged into Reddit as: {user}".format(user=reddit.user.me().name))
print("Logged into Imgur as: {imgur_user}".format(imgur_user=""))
print("{api_calls} Imgur API calls remaining for the day.".format(api_calls=client.credits["ClientRemaining"]))
print("----------")
default_url = "https://imgur.com/"
command_call = '!compile-album'
subreddit = reddit.subreddit("all")
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == "t1_":
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)
elif parent_id[0:3] == "t3_":
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)
run_bot()
|
flexible
|
{
"blob_id": "ca009022832963934230e356f9ea9eaedac7378b",
"index": 1745,
"step-1": "<mask token>\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\n<mask token>\n\n\ndef is_gif(url):\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-4": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-5": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = \"Here is an album of all unique image/gif posts made by \" \\\n \"[{user}]({album_url}). ({num_images} images\" \\\n \")\".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({\"title\": user.name, \"privacy\": \"hidden\"})\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top(\"all\"):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config=None, anon=False)\n images.append(image[\"id\"])\n # Sleep command to avoid exceeding rate limit\n # 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album[\"id\"], images)\n return album[\"id\"]\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(\n client_id=config.CLIENT_ID_REDDIT,\n client_secret=config.SECRET_CODE_REDDIT,\n user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT,\n password=config.PASSWORD_REDDIT\n )\n\n client=ImgurClient(\n client_id=config.CLIENT_ID_IMGUR,\n client_secret=config.SECRET_CODE_IMGUR,\n access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR\n )\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print(\"Logged in at: {time}\".format(time = login_time))\n print(\"Logged into Reddit as: {user}\".format(user=reddit.user.me().name))\n print(\"Logged into Imgur as: {imgur_user}\".format(imgur_user=\"\"))\n print(\"{api_calls} Imgur API calls remaining for the day.\".format(api_calls=client.credits[\"ClientRemaining\"]))\n print(\"----------\")\n default_url = \"https://imgur.com/\"\n command_call = '!compile-album'\n subreddit = reddit.subreddit(\"all\")\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == \"t1_\":\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)\n elif parent_id[0:3] == \"t3_\":\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)\n\n\nrun_bot()",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
from redis3barScore import StudyThreeBarsScore
from redisUtil import RedisTimeFrame
def test_score1() -> None:
package = {'close': 13.92,
'high': 14.57,
'low': 12.45,
'open': 13.4584,
'symbol': 'FANG',
'timestamp': 1627493640000000000,
'trade_count': 602,
'volume': 213907,
'vwap': 8.510506}
app = StudyThreeBarsScore()
newPrice = 13.70
realtime = []
symbol = "FANG"
stack = {'symbol': symbol, 'value': {
'firstPrice': 13.50,
'secondPrice': 14.00,
'thirdPrice': 13.00,
'timeframe': RedisTimeFrame.MIN2
}}
score1 = app.ThreeBarPlay(13.60, [], stack)
assert score1 == 4
score2 = app.ThreeBarPlay(13.40, [], stack)
assert score2 == 2
|
normal
|
{
"blob_id": "ec64ddd01034debadb6674e71125f673f5de8367",
"index": 567,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_score1() ->None:\n package = {'close': 13.92, 'high': 14.57, 'low': 12.45, 'open': 13.4584,\n 'symbol': 'FANG', 'timestamp': 1627493640000000000, 'trade_count': \n 602, 'volume': 213907, 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.7\n realtime = []\n symbol = 'FANG'\n stack = {'symbol': symbol, 'value': {'firstPrice': 13.5, 'secondPrice':\n 14.0, 'thirdPrice': 13.0, 'timeframe': RedisTimeFrame.MIN2}}\n score1 = app.ThreeBarPlay(13.6, [], stack)\n assert score1 == 4\n score2 = app.ThreeBarPlay(13.4, [], stack)\n assert score2 == 2\n",
"step-3": "from redis3barScore import StudyThreeBarsScore\nfrom redisUtil import RedisTimeFrame\n\n\ndef test_score1() ->None:\n package = {'close': 13.92, 'high': 14.57, 'low': 12.45, 'open': 13.4584,\n 'symbol': 'FANG', 'timestamp': 1627493640000000000, 'trade_count': \n 602, 'volume': 213907, 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.7\n realtime = []\n symbol = 'FANG'\n stack = {'symbol': symbol, 'value': {'firstPrice': 13.5, 'secondPrice':\n 14.0, 'thirdPrice': 13.0, 'timeframe': RedisTimeFrame.MIN2}}\n score1 = app.ThreeBarPlay(13.6, [], stack)\n assert score1 == 4\n score2 = app.ThreeBarPlay(13.4, [], stack)\n assert score2 == 2\n",
"step-4": "from redis3barScore import StudyThreeBarsScore\nfrom redisUtil import RedisTimeFrame\n\n\ndef test_score1() -> None:\n package = {'close': 13.92,\n 'high': 14.57,\n 'low': 12.45,\n 'open': 13.4584,\n 'symbol': 'FANG',\n 'timestamp': 1627493640000000000,\n 'trade_count': 602,\n 'volume': 213907,\n 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.70\n realtime = []\n symbol = \"FANG\"\n stack = {'symbol': symbol, 'value': {\n 'firstPrice': 13.50,\n 'secondPrice': 14.00,\n 'thirdPrice': 13.00,\n 'timeframe': RedisTimeFrame.MIN2\n }}\n score1 = app.ThreeBarPlay(13.60, [], stack)\n assert score1 == 4\n\n score2 = app.ThreeBarPlay(13.40, [], stack)\n assert score2 == 2\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from functiona import *
total = totalMarks(85, 67, 56, 45, 78)
avg = average(total)
grade = findGrade(avg)
print(grade)
print(total)
print(avg)
|
normal
|
{
"blob_id": "05f77472625e902b66c4a97a4c640835826bd494",
"index": 3635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-3": "<mask token>\ntotal = totalMarks(85, 67, 56, 45, 78)\navg = average(total)\ngrade = findGrade(avg)\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-4": "from functiona import *\ntotal = totalMarks(85, 67, 56, 45, 78)\navg = average(total)\ngrade = findGrade(avg)\nprint(grade)\nprint(total)\nprint(avg)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
import sys
import os
import sqlite3
from matplotlib import pyplot as plt
import numpy as np
def main():
if len(sys.argv) < 2:
print('usage: sqlite_file ...')
sys.exit()
db_filenames = sys.argv[1:]
num_of_dbs = len(db_filenames)
conn = sqlite3.connect(":memory:")
c = conn.cursor()
for i in range(num_of_dbs):
sql = "ATTACH DATABASE '{}' as db{}".format(db_filenames[i], i)
c.execute(sql)
sql = 'SELECT text'
for i in range(num_of_dbs):
sql += ', SUM(db{}) as db{}'.format(i, i)
sql += ' FROM (\n'
for i in range(num_of_dbs):
if i > 0:
sql += ' UNION\n'
sql += ' SELECT text'
for j in range(num_of_dbs):
if i == j:
sql += ', SUM(end - start)'
else:
sql += ', 0'
sql += ' as db{}'.format(j)
sql += ' FROM db{}.NVTX_EVENTS WHERE text IS NOT NULL GROUP BY text\n'.format(i)
sql += ') GROUP BY text'
# print(sql)
labels = []
durations = []
i = 0
for j in range(num_of_dbs):
durations.append([])
for row in c.execute(sql):
#print(row)
labels.append(row[0])
lst = []
for j in range(num_of_dbs):
durations[j].append(row[1+j])
i += 1
conn.close()
x = np.arange(len(labels))
width = 1.5 / (num_of_dbs * len(labels))
fig, ax = plt.subplots()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{:.1f}'.format(height/1e9),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
for i in range(num_of_dbs):
autolabel(ax.bar(-(num_of_dbs*width)/2 + width/2 + x + width*i, durations[i], width * 0.95, label=os.path.splitext(db_filenames[i])[0]))
plt.xticks(x, labels, rotation=60, rotation_mode="anchor", horizontalalignment="right", verticalalignment="center")
ax.legend(bbox_to_anchor=(1.1, 1.05))
# plt.yticks([1e8, 1e8 * 5, 1e9, 1e9 * 5])
plt.yticks([])
plt.ylabel('Time(sec)')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,y1,y2*1.05))
plt.tight_layout()
plt.show()
# plt.savefig(os.path.splitext(db_filenames[0])[0] + ".svg")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "b24ce9ed2df11df4cbf47949915685c09ec7543a",
"index": 7070,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: sqlite_file ...')\n sys.exit()\n db_filenames = sys.argv[1:]\n num_of_dbs = len(db_filenames)\n conn = sqlite3.connect(':memory:')\n c = conn.cursor()\n for i in range(num_of_dbs):\n sql = \"ATTACH DATABASE '{}' as db{}\".format(db_filenames[i], i)\n c.execute(sql)\n sql = 'SELECT text'\n for i in range(num_of_dbs):\n sql += ', SUM(db{}) as db{}'.format(i, i)\n sql += ' FROM (\\n'\n for i in range(num_of_dbs):\n if i > 0:\n sql += ' UNION\\n'\n sql += ' SELECT text'\n for j in range(num_of_dbs):\n if i == j:\n sql += ', SUM(end - start)'\n else:\n sql += ', 0'\n sql += ' as db{}'.format(j)\n sql += (' FROM db{}.NVTX_EVENTS WHERE text IS NOT NULL GROUP BY text\\n'\n .format(i))\n sql += ') GROUP BY text'\n labels = []\n durations = []\n i = 0\n for j in range(num_of_dbs):\n durations.append([])\n for row in c.execute(sql):\n labels.append(row[0])\n lst = []\n for j in range(num_of_dbs):\n durations[j].append(row[1 + j])\n i += 1\n conn.close()\n x = np.arange(len(labels))\n width = 1.5 / (num_of_dbs * len(labels))\n fig, ax = plt.subplots()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.1f}'.format(height / 1000000000.0), xy=(rect.\n get_x() + rect.get_width() / 2, height), xytext=(0, 3),\n textcoords='offset points', ha='center', va='bottom')\n for i in range(num_of_dbs):\n autolabel(ax.bar(-(num_of_dbs * width) / 2 + width / 2 + x + width *\n i, durations[i], width * 0.95, label=os.path.splitext(\n db_filenames[i])[0]))\n plt.xticks(x, labels, rotation=60, rotation_mode='anchor',\n horizontalalignment='right', verticalalignment='center')\n ax.legend(bbox_to_anchor=(1.1, 1.05))\n plt.yticks([])\n plt.ylabel('Time(sec)')\n x1, x2, y1, y2 = plt.axis()\n plt.axis((x1, x2, y1, y2 * 1.05))\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: sqlite_file ...')\n sys.exit()\n db_filenames = sys.argv[1:]\n num_of_dbs = len(db_filenames)\n conn = sqlite3.connect(':memory:')\n c = conn.cursor()\n for i in range(num_of_dbs):\n sql = \"ATTACH DATABASE '{}' as db{}\".format(db_filenames[i], i)\n c.execute(sql)\n sql = 'SELECT text'\n for i in range(num_of_dbs):\n sql += ', SUM(db{}) as db{}'.format(i, i)\n sql += ' FROM (\\n'\n for i in range(num_of_dbs):\n if i > 0:\n sql += ' UNION\\n'\n sql += ' SELECT text'\n for j in range(num_of_dbs):\n if i == j:\n sql += ', SUM(end - start)'\n else:\n sql += ', 0'\n sql += ' as db{}'.format(j)\n sql += (' FROM db{}.NVTX_EVENTS WHERE text IS NOT NULL GROUP BY text\\n'\n .format(i))\n sql += ') GROUP BY text'\n labels = []\n durations = []\n i = 0\n for j in range(num_of_dbs):\n durations.append([])\n for row in c.execute(sql):\n labels.append(row[0])\n lst = []\n for j in range(num_of_dbs):\n durations[j].append(row[1 + j])\n i += 1\n conn.close()\n x = np.arange(len(labels))\n width = 1.5 / (num_of_dbs * len(labels))\n fig, ax = plt.subplots()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.1f}'.format(height / 1000000000.0), xy=(rect.\n get_x() + rect.get_width() / 2, height), xytext=(0, 3),\n textcoords='offset points', ha='center', va='bottom')\n for i in range(num_of_dbs):\n autolabel(ax.bar(-(num_of_dbs * width) / 2 + width / 2 + x + width *\n i, durations[i], width * 0.95, label=os.path.splitext(\n db_filenames[i])[0]))\n plt.xticks(x, labels, rotation=60, rotation_mode='anchor',\n horizontalalignment='right', verticalalignment='center')\n ax.legend(bbox_to_anchor=(1.1, 1.05))\n plt.yticks([])\n plt.ylabel('Time(sec)')\n x1, x2, y1, y2 = plt.axis()\n plt.axis((x1, x2, y1, y2 * 1.05))\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport os\nimport sqlite3\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: sqlite_file ...')\n sys.exit()\n db_filenames = sys.argv[1:]\n num_of_dbs = len(db_filenames)\n conn = sqlite3.connect(':memory:')\n c = conn.cursor()\n for i in range(num_of_dbs):\n sql = \"ATTACH DATABASE '{}' as db{}\".format(db_filenames[i], i)\n c.execute(sql)\n sql = 'SELECT text'\n for i in range(num_of_dbs):\n sql += ', SUM(db{}) as db{}'.format(i, i)\n sql += ' FROM (\\n'\n for i in range(num_of_dbs):\n if i > 0:\n sql += ' UNION\\n'\n sql += ' SELECT text'\n for j in range(num_of_dbs):\n if i == j:\n sql += ', SUM(end - start)'\n else:\n sql += ', 0'\n sql += ' as db{}'.format(j)\n sql += (' FROM db{}.NVTX_EVENTS WHERE text IS NOT NULL GROUP BY text\\n'\n .format(i))\n sql += ') GROUP BY text'\n labels = []\n durations = []\n i = 0\n for j in range(num_of_dbs):\n durations.append([])\n for row in c.execute(sql):\n labels.append(row[0])\n lst = []\n for j in range(num_of_dbs):\n durations[j].append(row[1 + j])\n i += 1\n conn.close()\n x = np.arange(len(labels))\n width = 1.5 / (num_of_dbs * len(labels))\n fig, ax = plt.subplots()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.1f}'.format(height / 1000000000.0), xy=(rect.\n get_x() + rect.get_width() / 2, height), xytext=(0, 3),\n textcoords='offset points', ha='center', va='bottom')\n for i in range(num_of_dbs):\n autolabel(ax.bar(-(num_of_dbs * width) / 2 + width / 2 + x + width *\n i, durations[i], width * 0.95, label=os.path.splitext(\n db_filenames[i])[0]))\n plt.xticks(x, labels, rotation=60, rotation_mode='anchor',\n horizontalalignment='right', verticalalignment='center')\n ax.legend(bbox_to_anchor=(1.1, 1.05))\n plt.yticks([])\n plt.ylabel('Time(sec)')\n x1, x2, y1, y2 = plt.axis()\n plt.axis((x1, x2, y1, y2 * 1.05))\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n\nimport sys\nimport os\nimport sqlite3\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: sqlite_file ...')\n sys.exit()\n db_filenames = sys.argv[1:]\n num_of_dbs = len(db_filenames)\n conn = sqlite3.connect(\":memory:\")\n c = conn.cursor()\n\n for i in range(num_of_dbs):\n sql = \"ATTACH DATABASE '{}' as db{}\".format(db_filenames[i], i)\n c.execute(sql)\n\n sql = 'SELECT text'\n for i in range(num_of_dbs):\n sql += ', SUM(db{}) as db{}'.format(i, i)\n sql += ' FROM (\\n'\n for i in range(num_of_dbs):\n if i > 0:\n sql += ' UNION\\n'\n sql += ' SELECT text'\n for j in range(num_of_dbs):\n if i == j:\n sql += ', SUM(end - start)'\n else:\n sql += ', 0'\n sql += ' as db{}'.format(j)\n sql += ' FROM db{}.NVTX_EVENTS WHERE text IS NOT NULL GROUP BY text\\n'.format(i)\n sql += ') GROUP BY text'\n # print(sql)\n\n labels = []\n durations = []\n i = 0\n for j in range(num_of_dbs):\n durations.append([])\n for row in c.execute(sql):\n #print(row)\n labels.append(row[0])\n lst = []\n for j in range(num_of_dbs):\n durations[j].append(row[1+j])\n i += 1\n conn.close()\n x = np.arange(len(labels))\n width = 1.5 / (num_of_dbs * len(labels))\n fig, ax = plt.subplots()\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{:.1f}'.format(height/1e9),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\n for i in range(num_of_dbs):\n autolabel(ax.bar(-(num_of_dbs*width)/2 + width/2 + x + width*i, durations[i], width * 0.95, label=os.path.splitext(db_filenames[i])[0]))\n plt.xticks(x, labels, rotation=60, rotation_mode=\"anchor\", horizontalalignment=\"right\", verticalalignment=\"center\")\n ax.legend(bbox_to_anchor=(1.1, 1.05))\n # plt.yticks([1e8, 1e8 * 5, 1e9, 1e9 * 5])\n plt.yticks([])\n plt.ylabel('Time(sec)')\n\n x1,x2,y1,y2 = plt.axis()\n plt.axis((x1,x2,y1,y2*1.05))\n\n plt.tight_layout()\n plt.show()\n # plt.savefig(os.path.splitext(db_filenames[0])[0] + \".svg\")\n\nif __name__ == \"__main__\":\n main()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
df = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],
['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',
'2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],
['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',
'2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],
['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])
df2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,
12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',
'2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)
)
print(df2.sum())
|
normal
|
{
"blob_id": "8d8f1f0dbb76b5c536bd1a2142bb61c51dd75075",
"index": 9573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-3": "<mask token>\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-4": "import pandas as pd\nimport numpy as np\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
CARD_SIZE = (70, 90)
SPACING = 3
|
normal
|
{
"blob_id": "b8ebbef7403a71d6165a5462bc08e2634b4cebc5",
"index": 4287,
"step-1": "<mask token>\n",
"step-2": "CARD_SIZE = 70, 90\nSPACING = 3\n",
"step-3": "CARD_SIZE = (70, 90)\nSPACING = 3",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]'
, f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +
'0' + str(b['scId'])[2:]) for b in data['characters'] if
b['tID']}
else:
self.BRAWLERS = {}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]'
, f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +
'0' + str(b['scId'])[2:]) for b in data['characters'] if
b['tID']}
else:
self.BRAWLERS = {}
<|reserved_special_token_0|>
def typecasted(func):
"""Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11"""
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]'
, f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +
'0' + str(b['scId'])[2:]) for b in data['characters'] if
b['tID']}
else:
self.BRAWLERS = {}
def bstag(tag):
tag = tag.strip('#').upper().replace('O', '0')
allowed = '0289PYLQGRJCUV'
if len(tag) < 3:
raise NotFoundError('Tag less than 3 characters.', 404)
invalid = [c for c in tag if c not in allowed]
if invalid:
raise NotFoundError(invalid, 404)
return tag
def typecasted(func):
"""Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11"""
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
<|reserved_special_token_1|>
import inspect
import json
import os
import re
import urllib.request
from functools import wraps
from ..errors import NotFoundError
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search('^__version__ = [\\\'"]([^\\\'"]*)[\\\'"]'
, f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +
'0' + str(b['scId'])[2:]) for b in data['characters'] if
b['tID']}
else:
self.BRAWLERS = {}
def bstag(tag):
tag = tag.strip('#').upper().replace('O', '0')
allowed = '0289PYLQGRJCUV'
if len(tag) < 3:
raise NotFoundError('Tag less than 3 characters.', 404)
invalid = [c for c in tag if c not in allowed]
if invalid:
raise NotFoundError(invalid, 404)
return tag
def typecasted(func):
"""Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11"""
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
<|reserved_special_token_1|>
import inspect
import json
import os
import re
import urllib.request
from functools import wraps
from ..errors import NotFoundError
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
# self.BRAWLERS = [
# 'shelly', 'nita', 'colt', 'bull', 'jessie', # league reward 0-500
# 'brock', 'dynamike', 'bo', 'tick', '8-bit' # league reward 1000+
# 'el primo', 'barley', 'poco', 'rosa', # rare
# 'rico', 'penny', 'darryl', 'carl', # super rare
# 'frank', 'pam', 'piper', 'bibi', # epic
# 'mortis', 'tara', 'gene', # mythic
# 'spike', 'crow', 'leon' # legendary
# ]
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): str(b['scId'])[:2] + '0' + str(b['scId'])[2:] for b in data['characters'] if b['tID']}
else:
self.BRAWLERS = {}
def bstag(tag):
tag = tag.strip('#').upper().replace('O', '0')
allowed = '0289PYLQGRJCUV'
if len(tag) < 3:
raise NotFoundError('Tag less than 3 characters.', 404)
invalid = [c for c in tag if c not in allowed]
if invalid:
raise NotFoundError(invalid, 404)
return tag
def typecasted(func):
'''Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11'''
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a # do nothing
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
|
flexible
|
{
"blob_id": "3f3db7e8813f49fe0265e110236b6dc4fed6cd1b",
"index": 7214,
"step-1": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\n<mask token>\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-3": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-4": "import inspect\nimport json\nimport os\nimport re\nimport urllib.request\nfrom functools import wraps\nfrom ..errors import NotFoundError\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-5": "import inspect\nimport json\nimport os\nimport re\nimport urllib.request\nfrom functools import wraps\n\nfrom ..errors import NotFoundError\n\n\nclass API:\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n # self.BRAWLERS = [\n # 'shelly', 'nita', 'colt', 'bull', 'jessie', # league reward 0-500\n # 'brock', 'dynamike', 'bo', 'tick', '8-bit' # league reward 1000+\n # 'el primo', 'barley', 'poco', 'rosa', # rare\n # 'rico', 'penny', 'darryl', 'carl', # super rare\n # 'frank', 'pam', 'piper', 'bibi', # epic\n # 'mortis', 'tara', 'gene', # mythic\n # 'spike', 'crow', 'leon' # legendary\n # ]\n\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', f.read(), re.MULTILINE).group(1)\n\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): str(b['scId'])[:2] + '0' + str(b['scId'])[2:] for b in data['characters'] if b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\ndef typecasted(func):\n '''Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11'''\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a # do nothing\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
work_hours = 8
work_days = 5
pay_periods = 2
total = work_hours * work_days * pay_periods
rate = 17
pay = total * rate
print(pay)
# variables
name = "josh"
age = 30
# float
weight = 160.5
# list
kill_streak = [3, 5, 1, 9] # [90.9] list can contain sub lists
# range
players = list(range(1,10))
odds = list(range(1, 10, 2))
print(odds)
print(type(name), type(age), type(weight), type(kill_streak))
# dir(str)
# attributes
# help(str.upper)
# dir(__builtins__)
kill_streak_sum = sum(kill_streak)
length = len(kill_streak)
mean = kill_streak_sum / length
print(mean)
student_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]
tens = student_grades.count(10)
print(tens)
# dictionary (key:value)
family = {"josh": 30, "jess": 31, "bailey": 1.5}
age_sum = sum(family.values())
family_size = len(family)
average_age = age_sum / family_size
print(average_age)
# Tuple like a dictionary but non-mutable
palette_one = ("#f1f1f1", "#333333", "#4287f5")
palette_two = ("#f5f5f5", "#454545", "#6dd46a")
palette_three = ("#f0fff0", "#c7c7c7", "#725fb0")
palettes = (palette_one, palette_two, palette_three)
color_codes = palettes
temperature_data = {"morning": (3.1, 2.0, 4.9), "noon": (1.2, 0.9, 3.4), "evening": (0.2, 0.1, 1.0)}
day_temperatures = temperature_data
|
normal
|
{
"blob_id": "af2ef3c77cefe675f3d30c3234401f0f9bda3505",
"index": 8916,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pay)\n<mask token>\nprint(odds)\nprint(type(name), type(age), type(weight), type(kill_streak))\n<mask token>\nprint(mean)\n<mask token>\nprint(tens)\n<mask token>\nprint(average_age)\n<mask token>\n",
"step-3": "work_hours = 8\nwork_days = 5\npay_periods = 2\ntotal = work_hours * work_days * pay_periods\nrate = 17\npay = total * rate\nprint(pay)\nname = 'josh'\nage = 30\nweight = 160.5\nkill_streak = [3, 5, 1, 9]\nplayers = list(range(1, 10))\nodds = list(range(1, 10, 2))\nprint(odds)\nprint(type(name), type(age), type(weight), type(kill_streak))\nkill_streak_sum = sum(kill_streak)\nlength = len(kill_streak)\nmean = kill_streak_sum / length\nprint(mean)\nstudent_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]\ntens = student_grades.count(10)\nprint(tens)\nfamily = {'josh': 30, 'jess': 31, 'bailey': 1.5}\nage_sum = sum(family.values())\nfamily_size = len(family)\naverage_age = age_sum / family_size\nprint(average_age)\npalette_one = '#f1f1f1', '#333333', '#4287f5'\npalette_two = '#f5f5f5', '#454545', '#6dd46a'\npalette_three = '#f0fff0', '#c7c7c7', '#725fb0'\npalettes = palette_one, palette_two, palette_three\ncolor_codes = palettes\ntemperature_data = {'morning': (3.1, 2.0, 4.9), 'noon': (1.2, 0.9, 3.4),\n 'evening': (0.2, 0.1, 1.0)}\nday_temperatures = temperature_data\n",
"step-4": "work_hours = 8\r\nwork_days = 5\r\npay_periods = 2\r\ntotal = work_hours * work_days * pay_periods\r\nrate = 17\r\npay = total * rate\r\n\r\nprint(pay)\r\n\r\n# variables\r\nname = \"josh\"\r\nage = 30\r\n# float\r\nweight = 160.5\r\n# list\r\nkill_streak = [3, 5, 1, 9] # [90.9] list can contain sub lists\r\n# range\r\nplayers = list(range(1,10))\r\nodds = list(range(1, 10, 2))\r\nprint(odds)\r\n\r\nprint(type(name), type(age), type(weight), type(kill_streak))\r\n\r\n# dir(str)\r\n# attributes\r\n# help(str.upper)\r\n\r\n# dir(__builtins__)\r\n\r\nkill_streak_sum = sum(kill_streak)\r\nlength = len(kill_streak)\r\nmean = kill_streak_sum / length\r\n\r\nprint(mean)\r\n\r\nstudent_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]\r\ntens = student_grades.count(10)\r\n\r\nprint(tens)\r\n\r\n# dictionary (key:value)\r\nfamily = {\"josh\": 30, \"jess\": 31, \"bailey\": 1.5}\r\nage_sum = sum(family.values())\r\nfamily_size = len(family)\r\naverage_age = age_sum / family_size\r\n\r\nprint(average_age)\r\n\r\n# Tuple like a dictionary but non-mutable\r\npalette_one = (\"#f1f1f1\", \"#333333\", \"#4287f5\")\r\npalette_two = (\"#f5f5f5\", \"#454545\", \"#6dd46a\")\r\npalette_three = (\"#f0fff0\", \"#c7c7c7\", \"#725fb0\")\r\npalettes = (palette_one, palette_two, palette_three)\r\n\r\ncolor_codes = palettes\r\n\r\ntemperature_data = {\"morning\": (3.1, 2.0, 4.9), \"noon\": (1.2, 0.9, 3.4), \"evening\": (0.2, 0.1, 1.0)}\r\nday_temperatures = temperature_data\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
def maximumTime(self, time: str) ->str:
ans = ''
for i in range(5):
if time[i] != '?':
ans += time[i]
continue
if i == 0:
if time[1] in ['0', '1', '2', '3', '?']:
ans += '2'
else:
ans += '1'
elif i == 1:
if ans[0] == '1' or ans[0] == '0':
ans += '9'
else:
ans += '3'
elif i == 3:
ans += '5'
elif i == 4:
ans += '9'
return ans
|
normal
|
{
"blob_id": "e7494104ab98df2b640f710fa69584802b3e1259",
"index": 3032,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def maximumTime(self, time: str) ->str:\n ans = ''\n for i in range(5):\n if time[i] != '?':\n ans += time[i]\n continue\n if i == 0:\n if time[1] in ['0', '1', '2', '3', '?']:\n ans += '2'\n else:\n ans += '1'\n elif i == 1:\n if ans[0] == '1' or ans[0] == '0':\n ans += '9'\n else:\n ans += '3'\n elif i == 3:\n ans += '5'\n elif i == 4:\n ans += '9'\n return ans\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import numpy as np
from mpi4py import MPI
from parutils import pprint
comm = MPI.COMM_WORLD
pprint("-"*78)
pprint(" Running on %d cores" % comm.size)
pprint("-"*78)
comm.Barrier()
# Prepare a vector of N=5 elements to be broadcasted...
N = 5
if comm.rank == 0:
A = np.arange(N, dtype=np.float64) # rank 0 has proper data
else:
A = np.zeros(N, dtype=np.float64) # rank 0 has proper data
print("rank {0}: {1}".format(comm.rank, A))
comm.Barrier()
# Broadcast A from rank 0 to everybody
comm.Bcast( [A, MPI.DOUBLE], root=0)
# Everybody should now have the same...
print("[%02d] %s" % (comm.rank, A))
|
normal
|
{
"blob_id": "839b3ebffebce95de25f75edc67a647bd1318268",
"index": 5077,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\n<mask token>\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-3": "<mask token>\ncomm = MPI.COMM_WORLD\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-4": "from __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom mpi4py import MPI\nfrom parutils import pprint\ncomm = MPI.COMM_WORLD\npprint('-' * 78)\npprint(' Running on %d cores' % comm.size)\npprint('-' * 78)\ncomm.Barrier()\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64)\nelse:\n A = np.zeros(N, dtype=np.float64)\nprint('rank {0}: {1}'.format(comm.rank, A))\ncomm.Barrier()\ncomm.Bcast([A, MPI.DOUBLE], root=0)\nprint('[%02d] %s' % (comm.rank, A))\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom mpi4py import MPI\n\nfrom parutils import pprint\n\ncomm = MPI.COMM_WORLD\n\npprint(\"-\"*78)\npprint(\" Running on %d cores\" % comm.size)\npprint(\"-\"*78)\n\ncomm.Barrier()\n\n# Prepare a vector of N=5 elements to be broadcasted...\nN = 5\nif comm.rank == 0:\n A = np.arange(N, dtype=np.float64) # rank 0 has proper data\nelse:\n A = np.zeros(N, dtype=np.float64) # rank 0 has proper data\nprint(\"rank {0}: {1}\".format(comm.rank, A))\ncomm.Barrier()\n# Broadcast A from rank 0 to everybody\ncomm.Bcast( [A, MPI.DOUBLE], root=0)\n\n# Everybody should now have the same...\nprint(\"[%02d] %s\" % (comm.rank, A))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys, os
class Extractor:
def __init__(self, prefix=''):
self.variables = {}
self.prefix = os.path.basename(prefix)
'''
Returns the variable name if a variable with
the value <value> is found.
'''
def find_variable_name(self, value):
for var, val in self.variables.items():
if value == val:
return var
'''
Scans a list of <lines> containing CSS and
returns a list of strings containing the
rendered LESS version.
'''
def scan(self, lines):
yield "@import '%s_variables.less'\n\n" %self.prefix
for line in lines:
found_prop = False
for prop in ('background-color', 'background', 'color'):
if prop in line:
found_prop = True
value = line.split(':')[1].strip().replace('}', '')
if not (value in self.variables.values()):
self.variables['@var%i' %(len(self.variables) + 1)] = value
yield line.replace(value, self.find_variable_name(value) + ';')
if not found_prop:
yield line
'''
Returns the output for the variables.less
file as a list of strings
'''
def get_variables(self):
for var, val in self.variables.items():
yield var + ': ' + val
if __name__ == '__main__':
if len(sys.argv) > 1:
for path in sys.argv[1:]:
name = '.'.join(path.split('.')[:-1])
extractor = Extractor(name)
read = open(path)
write = open(name + '.less', 'w')
variables = open(name + '_variables.less', 'w')
try:
for line in extractor.scan(read.readlines()):
write.write(line)
for line in extractor.get_variables():
variables.write(line + os.linesep)
finally:
variables.close()
write.close()
read.close()
else:
print('usage: python extract.py [file]')
|
normal
|
{
"blob_id": "dffcaf47ec8e0daa940e7047f11681ef3eabc772",
"index": 8591,
"step-1": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n <mask token>\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n <mask token>\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close()\n else:\n print('usage: python extract.py [file]')\n",
"step-4": "import sys, os\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close()\n else:\n print('usage: python extract.py [file]')\n",
"step-5": "import sys, os\n\nclass Extractor:\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \n '''\n Returns the variable name if a variable with\n the value <value> is found.\n '''\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \n '''\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n '''\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" %self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not (value in self.variables.values()):\n self.variables['@var%i' %(len(self.variables) + 1)] = value\n yield line.replace(value, self.find_variable_name(value) + ';')\n if not found_prop:\n yield line\n\n '''\n Returns the output for the variables.less\n file as a list of strings\n '''\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val \n \n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close() \n \n else:\n print('usage: python extract.py [file]')\n \n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parser_stop(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
stdout = result['stdout']
"""
stdout: строки разделены
"""
data = stdout.split('\n')
result['data'] = data[0]
return result
return wrapper
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from functools import wraps
def parser_stop(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
stdout = result['stdout']
"""
stdout: строки разделены
"""
data = stdout.split('\n')
result['data'] = data[0]
return result
return wrapper
<|reserved_special_token_1|>
"""
Декоратор parser_stop - парсер результата вывода комманды docker stop.
"""
from functools import wraps
def parser_stop(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
stdout = result['stdout']
"""
stdout: строки разделены \n
"""
data = stdout.split('\n')
result['data'] = data[0]
return result
return wrapper
|
flexible
|
{
"blob_id": "4af573fa17f86ee067b870dce1f6ee482d1b14ff",
"index": 8281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parser_stop(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n \"\"\"\n stdout: строки разделены \n\n \"\"\"\n data = stdout.split('\\n')\n result['data'] = data[0]\n return result\n return wrapper\n",
"step-3": "<mask token>\nfrom functools import wraps\n\n\ndef parser_stop(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n \"\"\"\n stdout: строки разделены \n\n \"\"\"\n data = stdout.split('\\n')\n result['data'] = data[0]\n return result\n return wrapper\n",
"step-4": "\"\"\"\nДекоратор parser_stop - парсер результата вывода комманды docker stop.\n\"\"\"\n\nfrom functools import wraps\n\n\ndef parser_stop(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n\n \"\"\"\n stdout: строки разделены \\n\n \"\"\"\n\n data = stdout.split('\\n')\n result['data'] = data[0]\n\n return result\n\n return wrapper\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import turtle
from turtle import color
import random
screen = turtle.Screen()
screen.setup(width=500, height=400)
colours = ["red", "pink", "blue", "purple", "black", "green"]
y_pos = [100, 60, 20, -20, -60, -100]
user_bet = screen.textinput(title="Make your bet",
prompt="Which turtle will win? Choose a colour: ")
is_race_on = False
all_racers = []
class Racer(turtle.Turtle):
# def __init__(self, color, x, y):
def __init__(self, color, x, y):
super().__init__(shape="turtle")
self.color(color)
self.penup()
self.goto(x=x, y=y)
def race(self):
self.forward(random.randint(0, 10))
for i in range(0, 6):
racer = Racer(colours[i], -230, y_pos[i])
all_racers.append(racer)
if user_bet:
is_race_on = True
while is_race_on:
for racer in all_racers:
if racer.xcor() > 230:
is_race_on = False
winning_colour = racer.pencolor()
if winning_colour == user_bet:
print(
f"You won! The winning turtle colour was {winning_colour}.")
else:
print(
f"You lost! The winning turtle colour was {winning_colour}.")
racer.race()
screen.exitonclick()
|
normal
|
{
"blob_id": "f3aaa6ae7a9a57946bdb035a4d52e84541c1a292",
"index": 5934,
"step-1": "<mask token>\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\n<mask token>\n",
"step-2": "<mask token>\nscreen.setup(width=500, height=400)\n<mask token>\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-3": "<mask token>\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = ['red', 'pink', 'blue', 'purple', 'black', 'green']\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title='Make your bet', prompt=\n 'Which turtle will win? Choose a colour: ')\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-4": "import turtle\nfrom turtle import color\nimport random\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = ['red', 'pink', 'blue', 'purple', 'black', 'green']\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title='Make your bet', prompt=\n 'Which turtle will win? Choose a colour: ')\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n\n def __init__(self, color, x, y):\n super().__init__(shape='turtle')\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\nif user_bet:\n is_race_on = True\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f'You won! The winning turtle colour was {winning_colour}.'\n )\n else:\n print(\n f'You lost! The winning turtle colour was {winning_colour}.'\n )\n racer.race()\nscreen.exitonclick()\n",
"step-5": "import turtle\nfrom turtle import color\nimport random\n\nscreen = turtle.Screen()\nscreen.setup(width=500, height=400)\ncolours = [\"red\", \"pink\", \"blue\", \"purple\", \"black\", \"green\"]\ny_pos = [100, 60, 20, -20, -60, -100]\nuser_bet = screen.textinput(title=\"Make your bet\",\n prompt=\"Which turtle will win? Choose a colour: \")\nis_race_on = False\nall_racers = []\n\n\nclass Racer(turtle.Turtle):\n # def __init__(self, color, x, y):\n def __init__(self, color, x, y):\n super().__init__(shape=\"turtle\")\n self.color(color)\n self.penup()\n self.goto(x=x, y=y)\n\n def race(self):\n self.forward(random.randint(0, 10))\n\n\nfor i in range(0, 6):\n racer = Racer(colours[i], -230, y_pos[i])\n all_racers.append(racer)\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n for racer in all_racers:\n if racer.xcor() > 230:\n is_race_on = False\n winning_colour = racer.pencolor()\n if winning_colour == user_bet:\n print(\n f\"You won! The winning turtle colour was {winning_colour}.\")\n else:\n print(\n f\"You lost! The winning turtle colour was {winning_colour}.\")\n racer.race()\n\nscreen.exitonclick()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ctypes.cdll.LoadLibrary(so_filepath)
<|reserved_special_token_0|>
print('The sum of %.1f and %.1f is %.1f' % (x, y, a))
<|reserved_special_token_0|>
print('Subtracting %.1f from %.1f is %.1f' % (x, y, b))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__appname__ = 'myccalc.py'
__author__ = 'Joseph Palmer <[email protected]>'
__version__ = '0.0.1'
__license__ = 'License for this code/'
__date__ = 'Dec-2018'
<|reserved_special_token_0|>
so_filepath = '{}/libmycalc.so'.format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
add_floats = myccalc.add_floats
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
x = 1.2
y = 3.3
a = add_floats(x, y)
print('The sum of %.1f and %.1f is %.1f' % (x, y, a))
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print('Subtracting %.1f from %.1f is %.1f' % (x, y, b))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__appname__ = 'myccalc.py'
__author__ = 'Joseph Palmer <[email protected]>'
__version__ = '0.0.1'
__license__ = 'License for this code/'
__date__ = 'Dec-2018'
import os
import ctypes
so_filepath = '{}/libmycalc.so'.format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
add_floats = myccalc.add_floats
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
x = 1.2
y = 3.3
a = add_floats(x, y)
print('The sum of %.1f and %.1f is %.1f' % (x, y, a))
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print('Subtracting %.1f from %.1f is %.1f' % (x, y, b))
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""Shows how to call C code from python"""
__appname__ = "myccalc.py"
__author__ = "Joseph Palmer <[email protected]>"
__version__ = "0.0.1"
__license__ = "License for this code/"
__date__ = "Dec-2018"
## imports ##
import os
import ctypes
# Load the C library into python - needs the full path for some reason!
so_filepath = "{}/libmycalc.so".format(os.getcwd())
ctypes.cdll.LoadLibrary(so_filepath)
myccalc = ctypes.CDLL(so_filepath)
# make a simpler name for the mycalc.add_floats
add_floats = myccalc.add_floats
# tell python what variables this function takes & returns
add_floats.argtypes = [ctypes.c_float, ctypes.c_float]
add_floats.restype = ctypes.c_float
# the function can now be used
x = 1.2
y = 3.3
a = add_floats(x, y)
print("The sum of %.1f and %.1f is %.1f" % (x, y, a))
# we can do the same for others
sf = myccalc.subtract_floats
sf.argtypes = [ctypes.c_float, ctypes.c_float]
sf.restype = ctypes.c_float
b = sf(y, x)
print("Subtracting %.1f from %.1f is %.1f" % (x, y, b))
|
flexible
|
{
"blob_id": "12ecfd2750f79fd19355665b6e57c2103a3cac3e",
"index": 4257,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nctypes.cdll.LoadLibrary(so_filepath)\n<mask token>\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\n<mask token>\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-3": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\n<mask token>\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-4": "<mask token>\n__appname__ = 'myccalc.py'\n__author__ = 'Joseph Palmer <[email protected]>'\n__version__ = '0.0.1'\n__license__ = 'License for this code/'\n__date__ = 'Dec-2018'\nimport os\nimport ctypes\nso_filepath = '{}/libmycalc.so'.format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\nadd_floats = myccalc.add_floats\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint('The sum of %.1f and %.1f is %.1f' % (x, y, a))\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint('Subtracting %.1f from %.1f is %.1f' % (x, y, b))\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"Shows how to call C code from python\"\"\"\n__appname__ = \"myccalc.py\"\n__author__ = \"Joseph Palmer <[email protected]>\"\n__version__ = \"0.0.1\"\n__license__ = \"License for this code/\"\n__date__ = \"Dec-2018\"\n\n## imports ##\nimport os\nimport ctypes\n\n# Load the C library into python - needs the full path for some reason!\nso_filepath = \"{}/libmycalc.so\".format(os.getcwd())\nctypes.cdll.LoadLibrary(so_filepath)\nmyccalc = ctypes.CDLL(so_filepath)\n\n# make a simpler name for the mycalc.add_floats\nadd_floats = myccalc.add_floats\n\n# tell python what variables this function takes & returns\nadd_floats.argtypes = [ctypes.c_float, ctypes.c_float]\nadd_floats.restype = ctypes.c_float\n\n# the function can now be used\nx = 1.2\ny = 3.3\na = add_floats(x, y)\nprint(\"The sum of %.1f and %.1f is %.1f\" % (x, y, a))\n\n# we can do the same for others\nsf = myccalc.subtract_floats\nsf.argtypes = [ctypes.c_float, ctypes.c_float]\nsf.restype = ctypes.c_float\nb = sf(y, x)\nprint(\"Subtracting %.1f from %.1f is %.1f\" % (x, y, b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
# create datasets
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
# displaying original dataset
plt.scatter(x, y)
plt.title("Original dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# train dataset will be 80% of the data
train_x = x[:80]
train_y = y[:80]
# test dataset will be remaining 20% of the data
test_x = x[80:]
test_y = y[80:]
# displaying train dataset
plt.scatter(train_x, train_y)
plt.title("Train dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# displaying test dataset
plt.scatter(test_x, test_y)
plt.title("Test dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
|
normal
|
{
"blob_id": "9fd985e9675514f6c8f3ac5b91962eb744e0e82c",
"index": 6514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.seed(2)\n<mask token>\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n<mask token>\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-3": "<mask token>\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-4": "import numpy\nimport matplotlib.pyplot as plt\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-5": "import numpy\nimport matplotlib.pyplot as plt\n\nnumpy.random.seed(2)\n\n# create datasets\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\n\n# displaying original dataset\nplt.scatter(x, y)\nplt.title(\"Original dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# train dataset will be 80% of the data\ntrain_x = x[:80]\ntrain_y = y[:80]\n\n# test dataset will be remaining 20% of the data\ntest_x = x[80:]\ntest_y = y[80:]\n\n# displaying train dataset\nplt.scatter(train_x, train_y)\nplt.title(\"Train dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# displaying test dataset\nplt.scatter(test_x, test_y)\nplt.title(\"Test dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCommon(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(predictive_model=create_test_predictive_model
(predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value))
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({'max_depth': None, 'max_features': 'auto',
'n_estimators': 10}, config)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.test import TestCase
from src.core.common import get_method_config
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.models import PredictiveModels
from src.utils.tests_utils import create_test_job, create_test_predictive_model
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(predictive_model=create_test_predictive_model
(predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value))
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({'max_depth': None, 'max_features': 'auto',
'n_estimators': 10}, config)
<|reserved_special_token_1|>
"""
common tests
"""
from django.test import TestCase
from src.core.common import get_method_config
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.models import PredictiveModels
from src.utils.tests_utils import create_test_job, create_test_predictive_model
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value
)
)
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({
'max_depth': None,
'max_features': 'auto',
'n_estimators': 10,
}, config)
|
flexible
|
{
"blob_id": "824038a56e8aaf4adf6ec813a5728ab318547582",
"index": 1638,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommon(TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCommon(TestCase):\n\n def test_get_method_config(self):\n job = create_test_job(predictive_model=create_test_predictive_model\n (predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value))\n method, config = get_method_config(job)\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({'max_depth': None, 'max_features': 'auto',\n 'n_estimators': 10}, config)\n",
"step-4": "<mask token>\nfrom django.test import TestCase\nfrom src.core.common import get_method_config\nfrom src.predictive_model.classification.models import ClassificationMethods\nfrom src.predictive_model.models import PredictiveModels\nfrom src.utils.tests_utils import create_test_job, create_test_predictive_model\n\n\nclass TestCommon(TestCase):\n\n def test_get_method_config(self):\n job = create_test_job(predictive_model=create_test_predictive_model\n (predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value))\n method, config = get_method_config(job)\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({'max_depth': None, 'max_features': 'auto',\n 'n_estimators': 10}, config)\n",
"step-5": "\"\"\"\ncommon tests\n\"\"\"\n\nfrom django.test import TestCase\n\nfrom src.core.common import get_method_config\nfrom src.predictive_model.classification.models import ClassificationMethods\nfrom src.predictive_model.models import PredictiveModels\nfrom src.utils.tests_utils import create_test_job, create_test_predictive_model\n\n\nclass TestCommon(TestCase):\n def test_get_method_config(self):\n job = create_test_job(\n predictive_model=create_test_predictive_model(\n predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value\n )\n )\n\n method, config = get_method_config(job)\n\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({\n 'max_depth': None,\n 'max_features': 'auto',\n 'n_estimators': 10,\n }, config)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for Images in Faces:
lv_FaceId = Images['FaceId']
lv_ImageId = Images['ImageId']
lv_ExternalImageId = Images['ExternalImageId'],
lv_Names = ExternalImageId.split('_')
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print('FaceId %s' % lv_FaceId)
print('ImageId %s' % lv_ImageId)
print('ExternalImageId %s' % lv_ExternalImageId)
print('Infor %s' % json.dumps(Images))
print('FirstName %s' % lv_FirstName)
print('SurName %s' % lv_SurName)
print('PutItem succeeded:')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dbresource = boto3.resource('dynamodb', region_name='eu-west-1')
rekclient = boto3.client('rekognition', 'eu-west-1')
collection_name = 'swiftarycelebrity'
ScannedFacestable = dbresource.Table('ScannedFaces')
response = rekclient.list_faces(CollectionId=collection_name)
Faces = response['Faces']
for Images in Faces:
lv_FaceId = Images['FaceId']
lv_ImageId = Images['ImageId']
lv_ExternalImageId = Images['ExternalImageId'],
lv_Names = ExternalImageId.split('_')
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print('FaceId %s' % lv_FaceId)
print('ImageId %s' % lv_ImageId)
print('ExternalImageId %s' % lv_ExternalImageId)
print('Infor %s' % json.dumps(Images))
print('FirstName %s' % lv_FirstName)
print('SurName %s' % lv_SurName)
print('PutItem succeeded:')
<|reserved_special_token_1|>
import json
import boto3
from botocore.exceptions import ClientError
import uuid
dbresource = boto3.resource('dynamodb', region_name='eu-west-1')
rekclient = boto3.client('rekognition', 'eu-west-1')
collection_name = 'swiftarycelebrity'
ScannedFacestable = dbresource.Table('ScannedFaces')
response = rekclient.list_faces(CollectionId=collection_name)
Faces = response['Faces']
for Images in Faces:
lv_FaceId = Images['FaceId']
lv_ImageId = Images['ImageId']
lv_ExternalImageId = Images['ExternalImageId'],
lv_Names = ExternalImageId.split('_')
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print('FaceId %s' % lv_FaceId)
print('ImageId %s' % lv_ImageId)
print('ExternalImageId %s' % lv_ExternalImageId)
print('Infor %s' % json.dumps(Images))
print('FirstName %s' % lv_FirstName)
print('SurName %s' % lv_SurName)
print('PutItem succeeded:')
<|reserved_special_token_1|>
# Import the SDK
import json
import boto3
from botocore.exceptions import ClientError
import uuid
#dbclient = boto3.client('dynamodb')
dbresource = boto3.resource('dynamodb', region_name='eu-west-1')
rekclient = boto3.client('rekognition','eu-west-1')
collection_name = 'swiftarycelebrity'
ScannedFacestable = dbresource.Table('ScannedFaces')
#
# List all images in the bucket
#
response = rekclient.list_faces( CollectionId=collection_name)
Faces =response ['Faces']
#print Faces
for Images in Faces:
lv_FaceId = Images ['FaceId']
lv_ImageId = Images ['ImageId']
lv_ExternalImageId = Images ['ExternalImageId'],
lv_Names = ExternalImageId.split("_")
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print ('FaceId %s' % lv_FaceId)
print ('ImageId %s' % lv_ImageId)
print ('ExternalImageId %s' % lv_ExternalImageId)
print ('Infor %s' %json.dumps(Images))
print ('FirstName %s' % lv_FirstName )
print ('SurName %s' % lv_SurName )
#response = ScannedFacestable.put_item(
# Item={
# 'FaceId' : lv_FaceId,
# 'ImageId' : lv_ImageId,
# 'ExternalImageId' : lv_ExternalImageId,
# 'Firstname' : lv_Firstname,
# 'Surname' : lv_Surname ,
# 'Info' : json.dumps(Images)
# }
#)
print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
flexible
|
{
"blob_id": "6369c692e358c0dfd1193c6e961ecf9b521ea9ba",
"index": 4649,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-3": "<mask token>\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\nrekclient = boto3.client('rekognition', 'eu-west-1')\ncollection_name = 'swiftarycelebrity'\nScannedFacestable = dbresource.Table('ScannedFaces')\nresponse = rekclient.list_faces(CollectionId=collection_name)\nFaces = response['Faces']\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-4": "import json\nimport boto3\nfrom botocore.exceptions import ClientError\nimport uuid\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\nrekclient = boto3.client('rekognition', 'eu-west-1')\ncollection_name = 'swiftarycelebrity'\nScannedFacestable = dbresource.Table('ScannedFaces')\nresponse = rekclient.list_faces(CollectionId=collection_name)\nFaces = response['Faces']\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-5": "# Import the SDK\nimport json\nimport boto3\nfrom botocore.exceptions import ClientError\nimport uuid\n#dbclient = boto3.client('dynamodb')\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\n\nrekclient = boto3.client('rekognition','eu-west-1')\ncollection_name = 'swiftarycelebrity'\n\nScannedFacestable = dbresource.Table('ScannedFaces')\n\n#\n# List all images in the bucket\n#\n\n\nresponse = rekclient.list_faces( CollectionId=collection_name)\nFaces =response ['Faces']\n#print Faces\n\nfor Images in Faces:\n lv_FaceId = Images ['FaceId']\n lv_ImageId = Images ['ImageId']\n lv_ExternalImageId = Images ['ExternalImageId'],\n lv_Names = ExternalImageId.split(\"_\")\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n\n print ('FaceId %s' % lv_FaceId)\n print ('ImageId %s' % lv_ImageId)\n print ('ExternalImageId %s' % lv_ExternalImageId)\n print ('Infor %s' %json.dumps(Images)) \n print ('FirstName %s' % lv_FirstName )\n print ('SurName %s' % lv_SurName )\n\n\n #response = ScannedFacestable.put_item(\n # Item={\n # 'FaceId' : lv_FaceId,\n # 'ImageId' : lv_ImageId,\n # 'ExternalImageId' : lv_ExternalImageId,\n # 'Firstname' : lv_Firstname,\n # 'Surname' : lv_Surname ,\n # 'Info' : json.dumps(Images)\n # }\n #)\n\nprint(\"PutItem succeeded:\")\n#print(json.dumps(response, indent=4, cls=DecimalEncoder))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from google.cloud import vision
from google.cloud.vision import types
from google.oauth2 import service_account
import os
# import re
import io
import pdf2image
import tempfile
import datetime
# Google API
credentials = service_account.Credentials.from_service_account_file("APIKey.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
def OCRscan(self, imgfile):
print("Performing OCR Scan on the image ", imgfile)
with io.open(imgfile, "rb") as image_file:
content = image_file.read()
image = types.Image(content=content)
response_with_text = client.document_text_detection(image=image)
document = response_with_text.full_text_annotation
return document
def boxes_to_obj(self,bound):
return {'x1': bound.vertices[0].x ,'x2':bound.vertices[1].x ,
'y1':bound.vertices[0].y ,'y2':bound.vertices[2].y }
def generateTempFolder(self, prifx, src):
"Creating temp directory.."
print("Creating temp directory.. with src and prefix .. ", prifx, src)
# temp_dir = tempfile.mkdtemp(("-"+str(datetime.datetime.now()).replace(":", "-")), "PMR_Claims", self.cwd+os.sep
# + "GENERATED"+os.sep+"CLAIMS")
temp_dir = tempfile.mkdtemp(
("-"+str(datetime.datetime.now()).replace(":", "-")), prifx, src)
print("Temp directory created", temp_dir)
return temp_dir
def createSubDir(self, src, subDirNameList):
print("Creating a subdirectory..")
for subfolder_name in subDirNameList:
os.makedirs(os.path.join(src, subfolder_name))
def getFilesindir(self, dire):
print('Fetching the file in the directory')
print(dire)
return os.listdir(dire)
|
normal
|
{
"blob_id": "be69a9981fe6b53c3b9c4d2893913e4f9f7efb26",
"index": 6697,
"step-1": "<mask token>\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-3": "<mask token>\ncredentials = service_account.Credentials.from_service_account_file(\n 'APIKey.json')\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-4": "from google.cloud import vision\nfrom google.cloud.vision import types\nfrom google.oauth2 import service_account\nimport os\nimport io\nimport pdf2image\nimport tempfile\nimport datetime\ncredentials = service_account.Credentials.from_service_account_file(\n 'APIKey.json')\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n print('Performing OCR Scan on the image ', imgfile)\n with io.open(imgfile, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n return document\n\n\ndef boxes_to_obj(self, bound):\n return {'x1': bound.vertices[0].x, 'x2': bound.vertices[1].x, 'y1':\n bound.vertices[0].y, 'y2': bound.vertices[2].y}\n\n\ndef generateTempFolder(self, prifx, src):\n \"\"\"Creating temp directory..\"\"\"\n print('Creating temp directory.. with src and prefix .. ', prifx, src)\n temp_dir = tempfile.mkdtemp('-' + str(datetime.datetime.now()).replace(\n ':', '-'), prifx, src)\n print('Temp directory created', temp_dir)\n return temp_dir\n\n\ndef createSubDir(self, src, subDirNameList):\n print('Creating a subdirectory..')\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-5": "from google.cloud import vision\nfrom google.cloud.vision import types\nfrom google.oauth2 import service_account\n\n\nimport os\n# import re\nimport io\n\nimport pdf2image\nimport tempfile\nimport datetime\n\n\n# Google API\ncredentials = service_account.Credentials.from_service_account_file(\"APIKey.json\")\nclient = vision.ImageAnnotatorClient(credentials=credentials)\n\n\ndef OCRscan(self, imgfile):\n\n print(\"Performing OCR Scan on the image \", imgfile)\n with io.open(imgfile, \"rb\") as image_file:\n content = image_file.read()\n\n image = types.Image(content=content)\n response_with_text = client.document_text_detection(image=image)\n document = response_with_text.full_text_annotation\n\n return document\n\n\ndef boxes_to_obj(self,bound):\n \n return {'x1': bound.vertices[0].x ,'x2':bound.vertices[1].x ,\n 'y1':bound.vertices[0].y ,'y2':bound.vertices[2].y }\n\n\ndef generateTempFolder(self, prifx, src):\n \"Creating temp directory..\"\n\n print(\"Creating temp directory.. with src and prefix .. \", prifx, src)\n # temp_dir = tempfile.mkdtemp((\"-\"+str(datetime.datetime.now()).replace(\":\", \"-\")), \"PMR_Claims\", self.cwd+os.sep\n # + \"GENERATED\"+os.sep+\"CLAIMS\")\n temp_dir = tempfile.mkdtemp(\n (\"-\"+str(datetime.datetime.now()).replace(\":\", \"-\")), prifx, src)\n\n print(\"Temp directory created\", temp_dir)\n\n return temp_dir\n\ndef createSubDir(self, src, subDirNameList):\n print(\"Creating a subdirectory..\")\n\n for subfolder_name in subDirNameList:\n os.makedirs(os.path.join(src, subfolder_name))\n\n\ndef getFilesindir(self, dire):\n print('Fetching the file in the directory')\n print(dire)\n return os.listdir(dire)\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class RedArrow(ReporterPlugin):
<|reserved_special_token_0|>
def start(self):
self.addMenuItem()
self.options = {'extremum_calculate_badness': False,
'extremum_ignore_badness_below': 0,
'smooth_connection_max_distance': 4,
'fractional_ignore_point_zero': True,
'collinear_vectors_max_distance': 2, 'test_closepath': False}
self.run_tests = ['test_extrema', 'test_fractional_coords',
'test_fractional_transform', 'test_smooth',
'test_empty_segments', 'test_collinear', 'test_semi_hv',
'test_zero_handles']
self.errors = []
self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]
self.show_labels = not self.show_labels
self.toggleLabels()
<|reserved_special_token_0|>
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole('drawForegroundForLayer_: %s' % str(e))
<|reserved_special_token_0|>
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
outline_test_pen = OutlineTestPen(layer.parent.parent, self
.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,
self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.moveToPoint_((x, y - size))
myPath.lineToPoint_((x, y))
myPath.lineToPoint_((x + size, y))
myPath.moveToPoint_((x, y))
myPath.lineToPoint_((x + size, y - size))
myPath.stroke()
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *
circle_size, y - 0.5 * circle_size, circle_size, circle_size))
myPath.stroke()
if True:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RedArrow(ReporterPlugin):
<|reserved_special_token_0|>
def start(self):
self.addMenuItem()
self.options = {'extremum_calculate_badness': False,
'extremum_ignore_badness_below': 0,
'smooth_connection_max_distance': 4,
'fractional_ignore_point_zero': True,
'collinear_vectors_max_distance': 2, 'test_closepath': False}
self.run_tests = ['test_extrema', 'test_fractional_coords',
'test_fractional_transform', 'test_smooth',
'test_empty_segments', 'test_collinear', 'test_semi_hv',
'test_zero_handles']
self.errors = []
self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]
self.show_labels = not self.show_labels
self.toggleLabels()
def addMenuItem(self):
mainMenu = NSApplication.sharedApplication().mainMenu()
s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
Glyphs.localize({'en': u'Select Glyphs With Outline Errors',
'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')
newMenuItem.setTarget_(self)
mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole('drawForegroundForLayer_: %s' % str(e))
def toggleLabels(self):
if self.show_labels:
self.show_labels = False
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}
), 'action': self.toggleLabels}]
else:
self.show_labels = True
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Hide Error Labels', 'de':
u'Fehlerbeschriftung ausblenden'}), 'action': self.
toggleLabels}]
Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
outline_test_pen = OutlineTestPen(layer.parent.parent, self
.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,
self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.moveToPoint_((x, y - size))
myPath.lineToPoint_((x, y))
myPath.lineToPoint_((x + size, y))
myPath.moveToPoint_((x, y))
myPath.lineToPoint_((x + size, y - size))
myPath.stroke()
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *
circle_size, y - 0.5 * circle_size, circle_size, circle_size))
myPath.stroke()
if True:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawArrows(self, debug=False):
scale = self.getScale()
size = 10.0 / scale
width = 3.0 / scale
errors_by_position = {}
for e in self.errors:
if e.position is not None:
if (e.position[0], e.position[1]) in errors_by_position:
errors_by_position[e.position[0], e.position[1]].extend([e]
)
else:
errors_by_position[e.position[0], e.position[1]] = [e]
elif None in errors_by_position:
errors_by_position[None].extend([e])
else:
errors_by_position[None] = [e]
for pos, errors in errors_by_position.iteritems():
message = ''
for e in errors:
if e.badness is None or not debug:
message += '%s, ' % e.kind
else:
message += '%s (Severity %0.1f), ' % (e.kind, e.badness)
if pos is None:
pos = self.current_layer.width + 20, -10
self._drawUnspecified(pos, message.strip(', '), size, width)
else:
self._drawArrow(pos, message.strip(', '), size, width)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plugin_id = 'de.kutilek.RedArrow'
class RedArrow(ReporterPlugin):
def settings(self):
self.menuName = 'Red Arrows'
self.keyboardShortcut = 'a'
self.keyboardShortcutModifier = (NSCommandKeyMask | NSShiftKeyMask |
NSAlternateKeyMask)
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}),
'action': self.toggleLabels}]
def start(self):
self.addMenuItem()
self.options = {'extremum_calculate_badness': False,
'extremum_ignore_badness_below': 0,
'smooth_connection_max_distance': 4,
'fractional_ignore_point_zero': True,
'collinear_vectors_max_distance': 2, 'test_closepath': False}
self.run_tests = ['test_extrema', 'test_fractional_coords',
'test_fractional_transform', 'test_smooth',
'test_empty_segments', 'test_collinear', 'test_semi_hv',
'test_zero_handles']
self.errors = []
self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]
self.show_labels = not self.show_labels
self.toggleLabels()
def addMenuItem(self):
mainMenu = NSApplication.sharedApplication().mainMenu()
s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
Glyphs.localize({'en': u'Select Glyphs With Outline Errors',
'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')
newMenuItem.setTarget_(self)
mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole('drawForegroundForLayer_: %s' % str(e))
def toggleLabels(self):
if self.show_labels:
self.show_labels = False
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}
), 'action': self.toggleLabels}]
else:
self.show_labels = True
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Hide Error Labels', 'de':
u'Fehlerbeschriftung ausblenden'}), 'action': self.
toggleLabels}]
Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
outline_test_pen = OutlineTestPen(layer.parent.parent, self
.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,
self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.moveToPoint_((x, y - size))
myPath.lineToPoint_((x, y))
myPath.lineToPoint_((x + size, y))
myPath.moveToPoint_((x, y))
myPath.lineToPoint_((x + size, y - size))
myPath.stroke()
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *
circle_size, y - 0.5 * circle_size, circle_size, circle_size))
myPath.stroke()
if True:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawArrows(self, debug=False):
scale = self.getScale()
size = 10.0 / scale
width = 3.0 / scale
errors_by_position = {}
for e in self.errors:
if e.position is not None:
if (e.position[0], e.position[1]) in errors_by_position:
errors_by_position[e.position[0], e.position[1]].extend([e]
)
else:
errors_by_position[e.position[0], e.position[1]] = [e]
elif None in errors_by_position:
errors_by_position[None].extend([e])
else:
errors_by_position[None] = [e]
for pos, errors in errors_by_position.iteritems():
message = ''
for e in errors:
if e.badness is None or not debug:
message += '%s, ' % e.kind
else:
message += '%s (Severity %0.1f), ' % (e.kind, e.badness)
if pos is None:
pos = self.current_layer.width + 20, -10
self._drawUnspecified(pos, message.strip(', '), size, width)
else:
self._drawArrow(pos, message.strip(', '), size, width)
<|reserved_special_token_1|>
from GlyphsApp.plugins import *
from outlineTestPenGlyphs import OutlineTestPenGlyphs
from string import strip
plugin_id = 'de.kutilek.RedArrow'
class RedArrow(ReporterPlugin):
def settings(self):
self.menuName = 'Red Arrows'
self.keyboardShortcut = 'a'
self.keyboardShortcutModifier = (NSCommandKeyMask | NSShiftKeyMask |
NSAlternateKeyMask)
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}),
'action': self.toggleLabels}]
def start(self):
self.addMenuItem()
self.options = {'extremum_calculate_badness': False,
'extremum_ignore_badness_below': 0,
'smooth_connection_max_distance': 4,
'fractional_ignore_point_zero': True,
'collinear_vectors_max_distance': 2, 'test_closepath': False}
self.run_tests = ['test_extrema', 'test_fractional_coords',
'test_fractional_transform', 'test_smooth',
'test_empty_segments', 'test_collinear', 'test_semi_hv',
'test_zero_handles']
self.errors = []
self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]
self.show_labels = not self.show_labels
self.toggleLabels()
def addMenuItem(self):
mainMenu = NSApplication.sharedApplication().mainMenu()
s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
Glyphs.localize({'en': u'Select Glyphs With Outline Errors',
'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')
newMenuItem.setTarget_(self)
mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole('drawForegroundForLayer_: %s' % str(e))
def toggleLabels(self):
if self.show_labels:
self.show_labels = False
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}
), 'action': self.toggleLabels}]
else:
self.show_labels = True
self.generalContextMenus = [{'name': Glyphs.localize({'en':
u'Hide Error Labels', 'de':
u'Fehlerbeschriftung ausblenden'}), 'action': self.
toggleLabels}]
Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
outline_test_pen = OutlineTestPen(layer.parent.parent, self
.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,
self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.moveToPoint_((x, y - size))
myPath.lineToPoint_((x, y))
myPath.lineToPoint_((x + size, y))
myPath.moveToPoint_((x, y))
myPath.lineToPoint_((x + size, y - size))
myPath.stroke()
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85
).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_(width)
myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *
circle_size, y - 0.5 * circle_size, circle_size, circle_size))
myPath.stroke()
if True:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,
position[1] - 1.8 * size), {NSFontAttributeName: NSFont.
systemFontOfSize_(size), NSForegroundColorAttributeName:
NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4,
0.6, 0.7)})
def _drawArrows(self, debug=False):
scale = self.getScale()
size = 10.0 / scale
width = 3.0 / scale
errors_by_position = {}
for e in self.errors:
if e.position is not None:
if (e.position[0], e.position[1]) in errors_by_position:
errors_by_position[e.position[0], e.position[1]].extend([e]
)
else:
errors_by_position[e.position[0], e.position[1]] = [e]
elif None in errors_by_position:
errors_by_position[None].extend([e])
else:
errors_by_position[None] = [e]
for pos, errors in errors_by_position.iteritems():
message = ''
for e in errors:
if e.badness is None or not debug:
message += '%s, ' % e.kind
else:
message += '%s (Severity %0.1f), ' % (e.kind, e.badness)
if pos is None:
pos = self.current_layer.width + 20, -10
self._drawUnspecified(pos, message.strip(', '), size, width)
else:
self._drawArrow(pos, message.strip(', '), size, width)
<|reserved_special_token_1|>
# encoding: utf-8
from GlyphsApp.plugins import *
from outlineTestPenGlyphs import OutlineTestPenGlyphs
from string import strip
plugin_id = "de.kutilek.RedArrow"
class RedArrow(ReporterPlugin):
def settings(self):
self.menuName = "Red Arrows"
self.keyboardShortcut = 'a'
self.keyboardShortcutModifier = NSCommandKeyMask | NSShiftKeyMask | NSAlternateKeyMask
self.generalContextMenus = [
{"name": Glyphs.localize({'en': u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}), "action": self.toggleLabels},
]
def start(self):
self.addMenuItem()
self.options = {
"extremum_calculate_badness": False,
"extremum_ignore_badness_below": 0,
"smooth_connection_max_distance": 4,
"fractional_ignore_point_zero": True,
"collinear_vectors_max_distance": 2,
"test_closepath": False,
}
self.run_tests = [
"test_extrema",
"test_fractional_coords",
"test_fractional_transform",
"test_smooth",
"test_empty_segments",
"test_collinear",
"test_semi_hv",
#"test_closepath",
"test_zero_handles",
]
self.errors = []
self.show_labels = Glyphs.defaults["%s.showLabels" % plugin_id]
self.show_labels = not(self.show_labels)
self.toggleLabels()
def addMenuItem(self):
mainMenu = NSApplication.sharedApplication().mainMenu()
s = objc.selector(self.selectGlyphsWithErrors,signature='v@:')
newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
Glyphs.localize({
'en': u"Select Glyphs With Outline Errors",
'de': u'Glyphen mit Outlinefehlern auswählen'
}),
s,
""
)
newMenuItem.setTarget_(self)
mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)
def foreground(self, Layer):
try:
self._updateOutlineCheck(Layer)
except Exception as e:
self.logToConsole( "drawForegroundForLayer_: %s" % str(e) )
def toggleLabels(self):
if self.show_labels:
self.show_labels = False
self.generalContextMenus = [
{
"name": Glyphs.localize(
{
'en': u'Show Error Labels',
'de': u'Fehlerbeschriftung anzeigen'
}
),
"action": self.toggleLabels
},
]
else:
self.show_labels = True
self.generalContextMenus = [
{
"name": Glyphs.localize(
{
'en': u'Hide Error Labels',
'de': u'Fehlerbeschriftung ausblenden'
}
),
"action": self.toggleLabels
},
]
Glyphs.defaults["%s.showLabels" % plugin_id] = self.show_labels
def selectGlyphsWithErrors(self):
"""
Selects all glyphs with errors in the active layer
"""
font = NSApplication.sharedApplication().font
if font is None:
return None
font.disableUpdateInterface()
mid = font.selectedFontMaster.id
selection = []
# pre-filter glyph list
#glyphlist = [glyph.name for glyph in font.glyphs if len(glyph.layers[mid].paths) > 0]
glyphlist = font.glyphs.keys()
for glyph_name in glyphlist:
glyph = font.glyphs[glyph_name]
layer = glyph.layers[mid]
if layer is not None:
#try:
outline_test_pen = OutlineTestPen(layer.parent.parent, self.options, self.run_tests)
layer.draw(outline_test_pen)
if len(outline_test_pen.errors) > 0:
glyph.selected = True
selection.append(glyph_name)
else:
glyph.selected = False
#except Exception as e:
# self.logToConsole( "selectGlyphsWithErrors: Layer '%s': %s" % (glyph_name, str(e)) )
font.enableUpdateInterface()
def _updateOutlineCheck(self, layer):
self.current_layer = layer
self.errors = []
if layer is not None:
outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent, self.options, self.run_tests)
layer.drawPoints(outline_test_pen)
self.errors = outline_test_pen.errors
if self.errors:
self._drawArrows()
def _drawArrow(self, position, kind, size, width):
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_( width )
myPath.moveToPoint_( (x, y-size) )
myPath.lineToPoint_( (x, y) )
myPath.lineToPoint_( (x+size, y) )
myPath.moveToPoint_( (x, y) )
myPath.lineToPoint_( (x+size, y-size) )
myPath.stroke()
#mx, my = NSWindow.mouseLocationOutsideOfEventStream()
#NSLog("Mouse %f %f" % (mx, my))
#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):
if self.show_labels:
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_(
(position[0] + 1.8 * size, position[1] - 1.8 * size),
{
NSFontAttributeName: NSFont.systemFontOfSize_(size),
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),
}
)
def _drawUnspecified(self, position, kind, size, width):
circle_size = size * 1.3
width *= 0.8
x, y = position
NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()
myPath = NSBezierPath.alloc().init()
myPath.setLineWidth_( width )
myPath.appendBezierPathWithOvalInRect_( NSMakeRect( x - 0.5 * circle_size, y - 0.5 * circle_size, circle_size, circle_size ) )
myPath.stroke()
# FIXME
#mx, my = NSWindow.mouseLocationOutsideOfEventStream()
#NSLog("Mouse %f %f" % (mx, my))
#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):
if True: # show labels
myString = NSString.string().stringByAppendingString_(kind)
myString.drawAtPoint_withAttributes_(
(position[0] + 1.8 * size, position[1] - 1.8 * size),
{
NSFontAttributeName: NSFont.systemFontOfSize_(size),
NSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),
}
)
def _drawArrows(self, debug=False):
scale = self.getScale()
size = 10.0 / scale
width = 3.0 / scale
errors_by_position = {}
for e in self.errors:
if e.position is not None:
if (e.position[0], e.position[1]) in errors_by_position:
errors_by_position[(e.position[0], e.position[1])].extend([e])
else:
errors_by_position[(e.position[0], e.position[1])] = [e]
else:
if None in errors_by_position:
errors_by_position[None].extend([e])
else:
errors_by_position[None] = [e]
for pos, errors in errors_by_position.iteritems():
message = ""
for e in errors:
if e.badness is None or not debug:
message += "%s, " % (e.kind)
else:
message += "%s (Severity %0.1f), " % (e.kind, e.badness)
if pos is None:
#bb = self.current_layer.bounds
#pos = (bb.origin.x + 0.5 * bb.size.width, bb.origin.y + 0.5 * bb.size.height)
pos = (self.current_layer.width + 20, -10)
self._drawUnspecified(pos, message.strip(", "), size, width)
else:
self._drawArrow(pos, message.strip(", "), size, width)
|
flexible
|
{
"blob_id": "229d7378695f7e00176eb7c3962519af3db1b7e1",
"index": 4461,
"step-1": "<mask token>\n\n\nclass RedArrow(ReporterPlugin):\n <mask token>\n\n def start(self):\n self.addMenuItem()\n self.options = {'extremum_calculate_badness': False,\n 'extremum_ignore_badness_below': 0,\n 'smooth_connection_max_distance': 4,\n 'fractional_ignore_point_zero': True,\n 'collinear_vectors_max_distance': 2, 'test_closepath': False}\n self.run_tests = ['test_extrema', 'test_fractional_coords',\n 'test_fractional_transform', 'test_smooth',\n 'test_empty_segments', 'test_collinear', 'test_semi_hv',\n 'test_zero_handles']\n self.errors = []\n self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]\n self.show_labels = not self.show_labels\n self.toggleLabels()\n <mask token>\n\n def foreground(self, Layer):\n try:\n self._updateOutlineCheck(Layer)\n except Exception as e:\n self.logToConsole('drawForegroundForLayer_: %s' % str(e))\n <mask token>\n\n def selectGlyphsWithErrors(self):\n \"\"\"\n\t\tSelects all glyphs with errors in the active layer\n\t\t\"\"\"\n font = NSApplication.sharedApplication().font\n if font is None:\n return None\n font.disableUpdateInterface()\n mid = font.selectedFontMaster.id\n selection = []\n glyphlist = font.glyphs.keys()\n for glyph_name in glyphlist:\n glyph = font.glyphs[glyph_name]\n layer = glyph.layers[mid]\n if layer is not None:\n outline_test_pen = OutlineTestPen(layer.parent.parent, self\n .options, self.run_tests)\n layer.draw(outline_test_pen)\n if len(outline_test_pen.errors) > 0:\n glyph.selected = True\n selection.append(glyph_name)\n else:\n glyph.selected = False\n font.enableUpdateInterface()\n\n def _updateOutlineCheck(self, layer):\n self.current_layer = layer\n self.errors = []\n if layer is not None:\n outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,\n self.options, self.run_tests)\n layer.drawPoints(outline_test_pen)\n self.errors = outline_test_pen.errors\n if self.errors:\n self._drawArrows()\n\n def _drawArrow(self, position, kind, size, width):\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.moveToPoint_((x, y - size))\n myPath.lineToPoint_((x, y))\n myPath.lineToPoint_((x + size, y))\n myPath.moveToPoint_((x, y))\n myPath.lineToPoint_((x + size, y - size))\n myPath.stroke()\n if self.show_labels:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawUnspecified(self, position, kind, size, width):\n circle_size = size * 1.3\n width *= 0.8\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *\n circle_size, y - 0.5 * circle_size, circle_size, circle_size))\n myPath.stroke()\n if True:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RedArrow(ReporterPlugin):\n <mask token>\n\n def start(self):\n self.addMenuItem()\n self.options = {'extremum_calculate_badness': False,\n 'extremum_ignore_badness_below': 0,\n 'smooth_connection_max_distance': 4,\n 'fractional_ignore_point_zero': True,\n 'collinear_vectors_max_distance': 2, 'test_closepath': False}\n self.run_tests = ['test_extrema', 'test_fractional_coords',\n 'test_fractional_transform', 'test_smooth',\n 'test_empty_segments', 'test_collinear', 'test_semi_hv',\n 'test_zero_handles']\n self.errors = []\n self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]\n self.show_labels = not self.show_labels\n self.toggleLabels()\n\n def addMenuItem(self):\n mainMenu = NSApplication.sharedApplication().mainMenu()\n s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')\n newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n Glyphs.localize({'en': u'Select Glyphs With Outline Errors',\n 'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')\n newMenuItem.setTarget_(self)\n mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)\n\n def foreground(self, Layer):\n try:\n self._updateOutlineCheck(Layer)\n except Exception as e:\n self.logToConsole('drawForegroundForLayer_: %s' % str(e))\n\n def toggleLabels(self):\n if self.show_labels:\n self.show_labels = False\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}\n ), 'action': self.toggleLabels}]\n else:\n self.show_labels = True\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Hide Error Labels', 'de':\n u'Fehlerbeschriftung ausblenden'}), 'action': self.\n toggleLabels}]\n Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels\n\n def selectGlyphsWithErrors(self):\n \"\"\"\n\t\tSelects all glyphs with errors in the active layer\n\t\t\"\"\"\n font = NSApplication.sharedApplication().font\n if font is None:\n return None\n font.disableUpdateInterface()\n mid = font.selectedFontMaster.id\n selection = []\n glyphlist = font.glyphs.keys()\n for glyph_name in glyphlist:\n glyph = font.glyphs[glyph_name]\n layer = glyph.layers[mid]\n if layer is not None:\n outline_test_pen = OutlineTestPen(layer.parent.parent, self\n .options, self.run_tests)\n layer.draw(outline_test_pen)\n if len(outline_test_pen.errors) > 0:\n glyph.selected = True\n selection.append(glyph_name)\n else:\n glyph.selected = False\n font.enableUpdateInterface()\n\n def _updateOutlineCheck(self, layer):\n self.current_layer = layer\n self.errors = []\n if layer is not None:\n outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,\n self.options, self.run_tests)\n layer.drawPoints(outline_test_pen)\n self.errors = outline_test_pen.errors\n if self.errors:\n self._drawArrows()\n\n def _drawArrow(self, position, kind, size, width):\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.moveToPoint_((x, y - size))\n myPath.lineToPoint_((x, y))\n myPath.lineToPoint_((x + size, y))\n myPath.moveToPoint_((x, y))\n myPath.lineToPoint_((x + size, y - size))\n myPath.stroke()\n if self.show_labels:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawUnspecified(self, position, kind, size, width):\n circle_size = size * 1.3\n width *= 0.8\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *\n circle_size, y - 0.5 * circle_size, circle_size, circle_size))\n myPath.stroke()\n if True:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawArrows(self, debug=False):\n scale = self.getScale()\n size = 10.0 / scale\n width = 3.0 / scale\n errors_by_position = {}\n for e in self.errors:\n if e.position is not None:\n if (e.position[0], e.position[1]) in errors_by_position:\n errors_by_position[e.position[0], e.position[1]].extend([e]\n )\n else:\n errors_by_position[e.position[0], e.position[1]] = [e]\n elif None in errors_by_position:\n errors_by_position[None].extend([e])\n else:\n errors_by_position[None] = [e]\n for pos, errors in errors_by_position.iteritems():\n message = ''\n for e in errors:\n if e.badness is None or not debug:\n message += '%s, ' % e.kind\n else:\n message += '%s (Severity %0.1f), ' % (e.kind, e.badness)\n if pos is None:\n pos = self.current_layer.width + 20, -10\n self._drawUnspecified(pos, message.strip(', '), size, width)\n else:\n self._drawArrow(pos, message.strip(', '), size, width)\n",
"step-3": "<mask token>\nplugin_id = 'de.kutilek.RedArrow'\n\n\nclass RedArrow(ReporterPlugin):\n\n def settings(self):\n self.menuName = 'Red Arrows'\n self.keyboardShortcut = 'a'\n self.keyboardShortcutModifier = (NSCommandKeyMask | NSShiftKeyMask |\n NSAlternateKeyMask)\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}),\n 'action': self.toggleLabels}]\n\n def start(self):\n self.addMenuItem()\n self.options = {'extremum_calculate_badness': False,\n 'extremum_ignore_badness_below': 0,\n 'smooth_connection_max_distance': 4,\n 'fractional_ignore_point_zero': True,\n 'collinear_vectors_max_distance': 2, 'test_closepath': False}\n self.run_tests = ['test_extrema', 'test_fractional_coords',\n 'test_fractional_transform', 'test_smooth',\n 'test_empty_segments', 'test_collinear', 'test_semi_hv',\n 'test_zero_handles']\n self.errors = []\n self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]\n self.show_labels = not self.show_labels\n self.toggleLabels()\n\n def addMenuItem(self):\n mainMenu = NSApplication.sharedApplication().mainMenu()\n s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')\n newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n Glyphs.localize({'en': u'Select Glyphs With Outline Errors',\n 'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')\n newMenuItem.setTarget_(self)\n mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)\n\n def foreground(self, Layer):\n try:\n self._updateOutlineCheck(Layer)\n except Exception as e:\n self.logToConsole('drawForegroundForLayer_: %s' % str(e))\n\n def toggleLabels(self):\n if self.show_labels:\n self.show_labels = False\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}\n ), 'action': self.toggleLabels}]\n else:\n self.show_labels = True\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Hide Error Labels', 'de':\n u'Fehlerbeschriftung ausblenden'}), 'action': self.\n toggleLabels}]\n Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels\n\n def selectGlyphsWithErrors(self):\n \"\"\"\n\t\tSelects all glyphs with errors in the active layer\n\t\t\"\"\"\n font = NSApplication.sharedApplication().font\n if font is None:\n return None\n font.disableUpdateInterface()\n mid = font.selectedFontMaster.id\n selection = []\n glyphlist = font.glyphs.keys()\n for glyph_name in glyphlist:\n glyph = font.glyphs[glyph_name]\n layer = glyph.layers[mid]\n if layer is not None:\n outline_test_pen = OutlineTestPen(layer.parent.parent, self\n .options, self.run_tests)\n layer.draw(outline_test_pen)\n if len(outline_test_pen.errors) > 0:\n glyph.selected = True\n selection.append(glyph_name)\n else:\n glyph.selected = False\n font.enableUpdateInterface()\n\n def _updateOutlineCheck(self, layer):\n self.current_layer = layer\n self.errors = []\n if layer is not None:\n outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,\n self.options, self.run_tests)\n layer.drawPoints(outline_test_pen)\n self.errors = outline_test_pen.errors\n if self.errors:\n self._drawArrows()\n\n def _drawArrow(self, position, kind, size, width):\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.moveToPoint_((x, y - size))\n myPath.lineToPoint_((x, y))\n myPath.lineToPoint_((x + size, y))\n myPath.moveToPoint_((x, y))\n myPath.lineToPoint_((x + size, y - size))\n myPath.stroke()\n if self.show_labels:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawUnspecified(self, position, kind, size, width):\n circle_size = size * 1.3\n width *= 0.8\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *\n circle_size, y - 0.5 * circle_size, circle_size, circle_size))\n myPath.stroke()\n if True:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawArrows(self, debug=False):\n scale = self.getScale()\n size = 10.0 / scale\n width = 3.0 / scale\n errors_by_position = {}\n for e in self.errors:\n if e.position is not None:\n if (e.position[0], e.position[1]) in errors_by_position:\n errors_by_position[e.position[0], e.position[1]].extend([e]\n )\n else:\n errors_by_position[e.position[0], e.position[1]] = [e]\n elif None in errors_by_position:\n errors_by_position[None].extend([e])\n else:\n errors_by_position[None] = [e]\n for pos, errors in errors_by_position.iteritems():\n message = ''\n for e in errors:\n if e.badness is None or not debug:\n message += '%s, ' % e.kind\n else:\n message += '%s (Severity %0.1f), ' % (e.kind, e.badness)\n if pos is None:\n pos = self.current_layer.width + 20, -10\n self._drawUnspecified(pos, message.strip(', '), size, width)\n else:\n self._drawArrow(pos, message.strip(', '), size, width)\n",
"step-4": "from GlyphsApp.plugins import *\nfrom outlineTestPenGlyphs import OutlineTestPenGlyphs\nfrom string import strip\nplugin_id = 'de.kutilek.RedArrow'\n\n\nclass RedArrow(ReporterPlugin):\n\n def settings(self):\n self.menuName = 'Red Arrows'\n self.keyboardShortcut = 'a'\n self.keyboardShortcutModifier = (NSCommandKeyMask | NSShiftKeyMask |\n NSAlternateKeyMask)\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}),\n 'action': self.toggleLabels}]\n\n def start(self):\n self.addMenuItem()\n self.options = {'extremum_calculate_badness': False,\n 'extremum_ignore_badness_below': 0,\n 'smooth_connection_max_distance': 4,\n 'fractional_ignore_point_zero': True,\n 'collinear_vectors_max_distance': 2, 'test_closepath': False}\n self.run_tests = ['test_extrema', 'test_fractional_coords',\n 'test_fractional_transform', 'test_smooth',\n 'test_empty_segments', 'test_collinear', 'test_semi_hv',\n 'test_zero_handles']\n self.errors = []\n self.show_labels = Glyphs.defaults['%s.showLabels' % plugin_id]\n self.show_labels = not self.show_labels\n self.toggleLabels()\n\n def addMenuItem(self):\n mainMenu = NSApplication.sharedApplication().mainMenu()\n s = objc.selector(self.selectGlyphsWithErrors, signature='v@:')\n newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n Glyphs.localize({'en': u'Select Glyphs With Outline Errors',\n 'de': u'Glyphen mit Outlinefehlern auswählen'}), s, '')\n newMenuItem.setTarget_(self)\n mainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)\n\n def foreground(self, Layer):\n try:\n self._updateOutlineCheck(Layer)\n except Exception as e:\n self.logToConsole('drawForegroundForLayer_: %s' % str(e))\n\n def toggleLabels(self):\n if self.show_labels:\n self.show_labels = False\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}\n ), 'action': self.toggleLabels}]\n else:\n self.show_labels = True\n self.generalContextMenus = [{'name': Glyphs.localize({'en':\n u'Hide Error Labels', 'de':\n u'Fehlerbeschriftung ausblenden'}), 'action': self.\n toggleLabels}]\n Glyphs.defaults['%s.showLabels' % plugin_id] = self.show_labels\n\n def selectGlyphsWithErrors(self):\n \"\"\"\n\t\tSelects all glyphs with errors in the active layer\n\t\t\"\"\"\n font = NSApplication.sharedApplication().font\n if font is None:\n return None\n font.disableUpdateInterface()\n mid = font.selectedFontMaster.id\n selection = []\n glyphlist = font.glyphs.keys()\n for glyph_name in glyphlist:\n glyph = font.glyphs[glyph_name]\n layer = glyph.layers[mid]\n if layer is not None:\n outline_test_pen = OutlineTestPen(layer.parent.parent, self\n .options, self.run_tests)\n layer.draw(outline_test_pen)\n if len(outline_test_pen.errors) > 0:\n glyph.selected = True\n selection.append(glyph_name)\n else:\n glyph.selected = False\n font.enableUpdateInterface()\n\n def _updateOutlineCheck(self, layer):\n self.current_layer = layer\n self.errors = []\n if layer is not None:\n outline_test_pen = OutlineTestPenGlyphs(layer.parent.parent,\n self.options, self.run_tests)\n layer.drawPoints(outline_test_pen)\n self.errors = outline_test_pen.errors\n if self.errors:\n self._drawArrows()\n\n def _drawArrow(self, position, kind, size, width):\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.moveToPoint_((x, y - size))\n myPath.lineToPoint_((x, y))\n myPath.lineToPoint_((x + size, y))\n myPath.moveToPoint_((x, y))\n myPath.lineToPoint_((x + size, y - size))\n myPath.stroke()\n if self.show_labels:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawUnspecified(self, position, kind, size, width):\n circle_size = size * 1.3\n width *= 0.8\n x, y = position\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.9, 0.1, 0.0, 0.85\n ).set()\n myPath = NSBezierPath.alloc().init()\n myPath.setLineWidth_(width)\n myPath.appendBezierPathWithOvalInRect_(NSMakeRect(x - 0.5 *\n circle_size, y - 0.5 * circle_size, circle_size, circle_size))\n myPath.stroke()\n if True:\n myString = NSString.string().stringByAppendingString_(kind)\n myString.drawAtPoint_withAttributes_((position[0] + 1.8 * size,\n position[1] - 1.8 * size), {NSFontAttributeName: NSFont.\n systemFontOfSize_(size), NSForegroundColorAttributeName:\n NSColor.colorWithCalibratedRed_green_blue_alpha_(0.4, 0.4, \n 0.6, 0.7)})\n\n def _drawArrows(self, debug=False):\n scale = self.getScale()\n size = 10.0 / scale\n width = 3.0 / scale\n errors_by_position = {}\n for e in self.errors:\n if e.position is not None:\n if (e.position[0], e.position[1]) in errors_by_position:\n errors_by_position[e.position[0], e.position[1]].extend([e]\n )\n else:\n errors_by_position[e.position[0], e.position[1]] = [e]\n elif None in errors_by_position:\n errors_by_position[None].extend([e])\n else:\n errors_by_position[None] = [e]\n for pos, errors in errors_by_position.iteritems():\n message = ''\n for e in errors:\n if e.badness is None or not debug:\n message += '%s, ' % e.kind\n else:\n message += '%s (Severity %0.1f), ' % (e.kind, e.badness)\n if pos is None:\n pos = self.current_layer.width + 20, -10\n self._drawUnspecified(pos, message.strip(', '), size, width)\n else:\n self._drawArrow(pos, message.strip(', '), size, width)\n",
"step-5": "# encoding: utf-8\n\n\nfrom GlyphsApp.plugins import *\n\nfrom outlineTestPenGlyphs import OutlineTestPenGlyphs\nfrom string import strip\n\nplugin_id = \"de.kutilek.RedArrow\"\n\n\nclass RedArrow(ReporterPlugin):\n\t\n\tdef settings(self):\n\t\tself.menuName = \"Red Arrows\"\n\t\tself.keyboardShortcut = 'a'\n\t\tself.keyboardShortcutModifier = NSCommandKeyMask | NSShiftKeyMask | NSAlternateKeyMask\n\t\tself.generalContextMenus = [\n\t\t\t{\"name\": Glyphs.localize({'en': u'Show Error Labels', 'de': u'Fehlerbeschriftung anzeigen'}), \"action\": self.toggleLabels},\n\t\t]\n\t\n\tdef start(self):\n\t\tself.addMenuItem()\n\t\tself.options = {\n\t\t\t\"extremum_calculate_badness\": False,\n\t\t\t\"extremum_ignore_badness_below\": 0,\n\t\t\t\"smooth_connection_max_distance\": 4,\n\t\t\t\"fractional_ignore_point_zero\": True,\n\t\t\t\"collinear_vectors_max_distance\": 2,\n\t\t\t\"test_closepath\": False,\n\t\t}\n\t\tself.run_tests = [\n\t\t\t\"test_extrema\",\n\t\t\t\"test_fractional_coords\",\n\t\t\t\"test_fractional_transform\",\n\t\t\t\"test_smooth\",\n\t\t\t\"test_empty_segments\",\n\t\t\t\"test_collinear\",\n\t\t\t\"test_semi_hv\",\n\t\t\t#\"test_closepath\",\n\t\t\t\"test_zero_handles\",\n\t\t]\n\t\tself.errors = []\n\t\tself.show_labels = Glyphs.defaults[\"%s.showLabels\" % plugin_id]\n\t\tself.show_labels = not(self.show_labels)\n\t\tself.toggleLabels()\n\t\n\tdef addMenuItem(self):\n\t\tmainMenu = NSApplication.sharedApplication().mainMenu()\n\t\ts = objc.selector(self.selectGlyphsWithErrors,signature='v@:')\n\t\tnewMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n\t\t\tGlyphs.localize({\n\t\t\t\t'en': u\"Select Glyphs With Outline Errors\",\n\t\t\t\t'de': u'Glyphen mit Outlinefehlern auswählen'\n\t\t\t}),\n\t\t\ts,\n\t\t\t\"\"\n\t\t)\n\t\tnewMenuItem.setTarget_(self)\n\t\tmainMenu.itemAtIndex_(2).submenu().insertItem_atIndex_(newMenuItem, 11)\n\t\n\tdef foreground(self, Layer):\n\t\ttry:\n\t\t\tself._updateOutlineCheck(Layer)\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"drawForegroundForLayer_: %s\" % str(e) )\n\t\n\tdef toggleLabels(self):\n\t\tif self.show_labels:\n\t\t\tself.show_labels = False\n\t\t\tself.generalContextMenus = [\n\t\t\t\t{\n\t\t\t\t\t\"name\": Glyphs.localize(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'en': u'Show Error Labels',\n\t\t\t\t\t\t\t'de': u'Fehlerbeschriftung anzeigen'\n\t\t\t\t\t\t}\n\t\t\t\t\t),\n\t\t\t\t\t\"action\": self.toggleLabels\n\t\t\t\t},\n\t\t\t]\n\t\telse:\n\t\t\tself.show_labels = True\n\t\t\tself.generalContextMenus = [\n\t\t\t\t{\n\t\t\t\t\t\"name\": Glyphs.localize(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t'en': u'Hide Error Labels',\n\t\t\t\t\t\t\t'de': u'Fehlerbeschriftung ausblenden'\n\t\t\t\t\t\t}\n\t\t\t\t\t),\n\t\t\t\t\t\"action\": self.toggleLabels\n\t\t\t\t},\n\t\t\t]\n\t\tGlyphs.defaults[\"%s.showLabels\" % plugin_id] = self.show_labels\n\t\n\tdef selectGlyphsWithErrors(self):\n\t\t\"\"\"\n\t\tSelects all glyphs with errors in the active layer\n\t\t\"\"\"\n\t\tfont = NSApplication.sharedApplication().font\n\t\tif font is None:\n\t\t\treturn None\n\t\tfont.disableUpdateInterface()\n\t\tmid = font.selectedFontMaster.id\n\t\tselection = []\n\t\t# pre-filter glyph list\n\t\t#glyphlist = [glyph.name for glyph in font.glyphs if len(glyph.layers[mid].paths) > 0]\n\t\tglyphlist = font.glyphs.keys()\n\t\tfor glyph_name in glyphlist:\n\t\t\tglyph = font.glyphs[glyph_name]\n\t\t\tlayer = glyph.layers[mid]\n\t\t\tif layer is not None:\n\t\t\t\t#try:\n\t\t\t\toutline_test_pen = OutlineTestPen(layer.parent.parent, self.options, self.run_tests)\n\t\t\t\tlayer.draw(outline_test_pen)\n\t\t\t\tif len(outline_test_pen.errors) > 0:\n\t\t\t\t\tglyph.selected = True\n\t\t\t\t\tselection.append(glyph_name)\n\t\t\t\telse:\n\t\t\t\t\tglyph.selected = False\n\t\t\t\t#except Exception as e:\n\t\t\t\t#\tself.logToConsole( \"selectGlyphsWithErrors: Layer '%s': %s\" % (glyph_name, str(e)) )\n\t\tfont.enableUpdateInterface()\n\t\t\n\t\n\tdef _updateOutlineCheck(self, layer):\n\t\tself.current_layer = layer\n\t\tself.errors = []\n\t\tif layer is not None:\n\t\t\toutline_test_pen = OutlineTestPenGlyphs(layer.parent.parent, self.options, self.run_tests)\n\t\t\tlayer.drawPoints(outline_test_pen)\n\t\t\tself.errors = outline_test_pen.errors\n\t\t\tif self.errors:\n\t\t\t\tself._drawArrows()\n\t\n\tdef _drawArrow(self, position, kind, size, width):\n\t\tx, y = position\n\t\tNSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()\n\t\tmyPath = NSBezierPath.alloc().init()\n\t\tmyPath.setLineWidth_( width )\n\t\tmyPath.moveToPoint_( (x, y-size) )\n\t\tmyPath.lineToPoint_( (x, y) )\n\t\tmyPath.lineToPoint_( (x+size, y) )\n\t\tmyPath.moveToPoint_( (x, y) )\n\t\tmyPath.lineToPoint_( (x+size, y-size) )\n\t\tmyPath.stroke()\n\t\t#mx, my = NSWindow.mouseLocationOutsideOfEventStream()\n\t\t#NSLog(\"Mouse %f %f\" % (mx, my))\n\t\t#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):\n\t\tif self.show_labels:\n\t\t\tmyString = NSString.string().stringByAppendingString_(kind)\n\t\t\tmyString.drawAtPoint_withAttributes_(\n\t\t\t\t(position[0] + 1.8 * size, position[1] - 1.8 * size),\n\t\t\t\t{\n\t\t\t\t\tNSFontAttributeName: NSFont.systemFontOfSize_(size),\n\t\t\t\t\tNSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),\n\t\t\t\t}\n\t\t\t)\n\t\n\tdef _drawUnspecified(self, position, kind, size, width):\n\t\tcircle_size = size * 1.3\n\t\twidth *= 0.8\n\t\tx, y = position\n\t\tNSColor.colorWithCalibratedRed_green_blue_alpha_( 0.9, 0.1, 0.0, 0.85 ).set()\n\t\tmyPath = NSBezierPath.alloc().init()\n\t\tmyPath.setLineWidth_( width )\n\t\tmyPath.appendBezierPathWithOvalInRect_( NSMakeRect( x - 0.5 * circle_size, y - 0.5 * circle_size, circle_size, circle_size ) )\n\t\tmyPath.stroke()\n\t\t# FIXME\n\t\t#mx, my = NSWindow.mouseLocationOutsideOfEventStream()\n\t\t#NSLog(\"Mouse %f %f\" % (mx, my))\n\t\t#if NSMouseInRect((mx, my), NSMakeRect(x-size, y-size, size, size), False):\n\t\tif True: # show labels\n\t\t\tmyString = NSString.string().stringByAppendingString_(kind)\n\t\t\tmyString.drawAtPoint_withAttributes_(\n\t\t\t\t(position[0] + 1.8 * size, position[1] - 1.8 * size),\n\t\t\t\t{\n\t\t\t\t\tNSFontAttributeName: NSFont.systemFontOfSize_(size),\n\t\t\t\t\tNSForegroundColorAttributeName: NSColor.colorWithCalibratedRed_green_blue_alpha_( 0.4, 0.4, 0.6, 0.7 ),\n\t\t\t\t}\n\t\t\t)\n\t\n\tdef _drawArrows(self, debug=False):\n\t\tscale = self.getScale()\n\t\tsize = 10.0 / scale\n\t\twidth = 3.0 / scale\n\t\terrors_by_position = {}\n\t\tfor e in self.errors:\n\t\t\tif e.position is not None:\n\t\t\t\tif (e.position[0], e.position[1]) in errors_by_position:\n\t\t\t\t\terrors_by_position[(e.position[0], e.position[1])].extend([e])\n\t\t\t\telse:\n\t\t\t\t\terrors_by_position[(e.position[0], e.position[1])] = [e]\n\t\t\telse:\n\t\t\t\tif None in errors_by_position:\n\t\t\t\t\terrors_by_position[None].extend([e])\n\t\t\t\telse:\n\t\t\t\t\terrors_by_position[None] = [e]\n\t\tfor pos, errors in errors_by_position.iteritems():\n\t\t\tmessage = \"\"\n\t\t\tfor e in errors:\n\t\t\t\tif e.badness is None or not debug:\n\t\t\t\t\tmessage += \"%s, \" % (e.kind)\n\t\t\t\telse:\n\t\t\t\t\tmessage += \"%s (Severity %0.1f), \" % (e.kind, e.badness)\n\t\t\tif pos is None:\n\t\t\t\t#bb = self.current_layer.bounds\n\t\t\t\t#pos = (bb.origin.x + 0.5 * bb.size.width, bb.origin.y + 0.5 * bb.size.height)\n\t\t\t\tpos = (self.current_layer.width + 20, -10)\n\t\t\t\tself._drawUnspecified(pos, message.strip(\", \"), size, width)\n\t\t\telse:\n\t\t\t\tself._drawArrow(pos, message.strip(\", \"), size, width)\n",
"step-ids": [
7,
10,
12,
13,
14
]
}
|
[
7,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser.get(website)
<|reserved_special_token_0|>
find_link.click()
<|reserved_special_token_0|>
input_first_name.send_keys('Timur')
<|reserved_special_token_0|>
input_last_name.send_keys('Atabaev')
<|reserved_special_token_0|>
input_city.send_keys('Tashkent')
<|reserved_special_token_0|>
input_country.send_keys('Uzbekistan')
<|reserved_special_token_0|>
button.click()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser = webdriver.Chrome()
website = 'http://suninjuly.github.io/find_link_text'
link_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))
browser.get(website)
find_link = browser.find_element_by_link_text(link_text)
find_link.click()
input_first_name = browser.find_element_by_tag_name('input')
input_first_name.send_keys('Timur')
input_last_name = browser.find_element_by_name('last_name')
input_last_name.send_keys('Atabaev')
input_city = browser.find_element_by_class_name('city')
input_city.send_keys('Tashkent')
input_country = browser.find_element_by_id('country')
input_country.send_keys('Uzbekistan')
button = browser.find_element_by_css_selector('button.btn')
button.click()
<|reserved_special_token_1|>
from selenium import webdriver
import math
import time
browser = webdriver.Chrome()
website = 'http://suninjuly.github.io/find_link_text'
link_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))
browser.get(website)
find_link = browser.find_element_by_link_text(link_text)
find_link.click()
input_first_name = browser.find_element_by_tag_name('input')
input_first_name.send_keys('Timur')
input_last_name = browser.find_element_by_name('last_name')
input_last_name.send_keys('Atabaev')
input_city = browser.find_element_by_class_name('city')
input_city.send_keys('Tashkent')
input_country = browser.find_element_by_id('country')
input_country.send_keys('Uzbekistan')
button = browser.find_element_by_css_selector('button.btn')
button.click()
|
flexible
|
{
"blob_id": "aa17e22bc13436333b1db4aee41eeced373119a8",
"index": 5704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbrowser.get(website)\n<mask token>\nfind_link.click()\n<mask token>\ninput_first_name.send_keys('Timur')\n<mask token>\ninput_last_name.send_keys('Atabaev')\n<mask token>\ninput_city.send_keys('Tashkent')\n<mask token>\ninput_country.send_keys('Uzbekistan')\n<mask token>\nbutton.click()\n",
"step-3": "<mask token>\nbrowser = webdriver.Chrome()\nwebsite = 'http://suninjuly.github.io/find_link_text'\nlink_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))\nbrowser.get(website)\nfind_link = browser.find_element_by_link_text(link_text)\nfind_link.click()\ninput_first_name = browser.find_element_by_tag_name('input')\ninput_first_name.send_keys('Timur')\ninput_last_name = browser.find_element_by_name('last_name')\ninput_last_name.send_keys('Atabaev')\ninput_city = browser.find_element_by_class_name('city')\ninput_city.send_keys('Tashkent')\ninput_country = browser.find_element_by_id('country')\ninput_country.send_keys('Uzbekistan')\nbutton = browser.find_element_by_css_selector('button.btn')\nbutton.click()\n",
"step-4": "from selenium import webdriver\nimport math\nimport time\nbrowser = webdriver.Chrome()\nwebsite = 'http://suninjuly.github.io/find_link_text'\nlink_text = str(math.ceil(math.pow(math.pi, math.e) * 10000))\nbrowser.get(website)\nfind_link = browser.find_element_by_link_text(link_text)\nfind_link.click()\ninput_first_name = browser.find_element_by_tag_name('input')\ninput_first_name.send_keys('Timur')\ninput_last_name = browser.find_element_by_name('last_name')\ninput_last_name.send_keys('Atabaev')\ninput_city = browser.find_element_by_class_name('city')\ninput_city.send_keys('Tashkent')\ninput_country = browser.find_element_by_id('country')\ninput_country.send_keys('Uzbekistan')\nbutton = browser.find_element_by_css_selector('button.btn')\nbutton.click()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('RMS', '0001_initial')]
operations = [migrations.RenameField(model_name='inventorytable',
old_name='Restaurant_ID', new_name='Restaurant'), migrations.
RenameField(model_name='menuitemstable', old_name='Restaurant_ID',
new_name='Restaurant'), migrations.RenameField(model_name=
'reciperequirementstable', old_name='Ingredient_ID', new_name=
'Ingredient'), migrations.RenameField(model_name=
'reciperequirementstable', old_name='Item_ID', new_name='Item'),
migrations.RenameField(model_name='reciperequirementstable',
old_name='Restaurant_ID', new_name='Restaurant'), migrations.
RenameField(model_name='seatmanagementtable', old_name=
'Restaurant_ID', new_name='Restaurant')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('RMS', '0001_initial')]
operations = [migrations.RenameField(model_name='inventorytable',
old_name='Restaurant_ID', new_name='Restaurant'), migrations.
RenameField(model_name='menuitemstable', old_name='Restaurant_ID',
new_name='Restaurant'), migrations.RenameField(model_name=
'reciperequirementstable', old_name='Ingredient_ID', new_name=
'Ingredient'), migrations.RenameField(model_name=
'reciperequirementstable', old_name='Item_ID', new_name='Item'),
migrations.RenameField(model_name='reciperequirementstable',
old_name='Restaurant_ID', new_name='Restaurant'), migrations.
RenameField(model_name='seatmanagementtable', old_name=
'Restaurant_ID', new_name='Restaurant')]
<|reserved_special_token_1|>
# Generated by Django 3.1.6 on 2021-02-27 23:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('RMS', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='inventorytable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='menuitemstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Ingredient_ID',
new_name='Ingredient',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Item_ID',
new_name='Item',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='seatmanagementtable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
]
|
flexible
|
{
"blob_id": "ba336094d38a47457198919ce60969144a8fdedb",
"index": 5374,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-27 23:29\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('RMS', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='inventorytable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='menuitemstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Ingredient_ID',\n new_name='Ingredient',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Item_ID',\n new_name='Item',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='seatmanagementtable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from . import models
admin.site.register(models.Comentario)
# Register your models here.
|
normal
|
{
"blob_id": "d7d94cfed0b819297069c3434c70359a327403cd",
"index": 718,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(models.Comentario)\n",
"step-3": "from django.contrib import admin\nfrom . import models\nadmin.site.register(models.Comentario)\n",
"step-4": "from django.contrib import admin\nfrom . import models\n\nadmin.site.register(models.Comentario)\n\n# Register your models here.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 2.2.3 on 2019-07-11 22:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0002_property_details'),
]
operations = [
migrations.AlterField(
model_name='property_details',
name='flat_type',
field=models.CharField(choices=[('1', '1BHK'), ('2', '2BHK'), ('3', '3BHK')], max_length=20),
),
migrations.AlterField(
model_name='property_details',
name='possession',
field=models.CharField(choices=[('1', 'ready to move'), ('2', 'work on progress')], max_length=20),
),
migrations.AlterField(
model_name='property_details',
name='price_range',
field=models.CharField(choices=[('1', '$5000'), ('2', '$15,000'), ('3', '$25,000'), ('4', '$40,000'), ('5', '$50,000')], max_length=50),
),
]
|
normal
|
{
"blob_id": "8cdd7646dbf23259e160186f332b5cb02b67291b",
"index": 5121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app1', '0002_property_details')]\n operations = [migrations.AlterField(model_name='property_details', name\n ='flat_type', field=models.CharField(choices=[('1', '1BHK'), ('2',\n '2BHK'), ('3', '3BHK')], max_length=20)), migrations.AlterField(\n model_name='property_details', name='possession', field=models.\n CharField(choices=[('1', 'ready to move'), ('2', 'work on progress'\n )], max_length=20)), migrations.AlterField(model_name=\n 'property_details', name='price_range', field=models.CharField(\n choices=[('1', '$5000'), ('2', '$15,000'), ('3', '$25,000'), ('4',\n '$40,000'), ('5', '$50,000')], max_length=50))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app1', '0002_property_details')]\n operations = [migrations.AlterField(model_name='property_details', name\n ='flat_type', field=models.CharField(choices=[('1', '1BHK'), ('2',\n '2BHK'), ('3', '3BHK')], max_length=20)), migrations.AlterField(\n model_name='property_details', name='possession', field=models.\n CharField(choices=[('1', 'ready to move'), ('2', 'work on progress'\n )], max_length=20)), migrations.AlterField(model_name=\n 'property_details', name='price_range', field=models.CharField(\n choices=[('1', '$5000'), ('2', '$15,000'), ('3', '$25,000'), ('4',\n '$40,000'), ('5', '$50,000')], max_length=50))]\n",
"step-5": "# Generated by Django 2.2.3 on 2019-07-11 22:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app1', '0002_property_details'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property_details',\n name='flat_type',\n field=models.CharField(choices=[('1', '1BHK'), ('2', '2BHK'), ('3', '3BHK')], max_length=20),\n ),\n migrations.AlterField(\n model_name='property_details',\n name='possession',\n field=models.CharField(choices=[('1', 'ready to move'), ('2', 'work on progress')], max_length=20),\n ),\n migrations.AlterField(\n model_name='property_details',\n name='price_range',\n field=models.CharField(choices=[('1', '$5000'), ('2', '$15,000'), ('3', '$25,000'), ('4', '$40,000'), ('5', '$50,000')], max_length=50),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(name='unitreport', version='0.1.1', author='annahadji',
author_email='[email protected]', description=
'A small unittest-based tool for generating single page html reports in Python.'
, long_description=long_description, long_description_content_type=
'text/markdown', keywords=
'static unittest report generator Markdown plots tables', url=
'https://github.com/annahadji/unitreport', packages=['unitreport'],
package_data={'unitreport': ['templates/**']}, classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Visualization'], python_requires=
'>=3.6', install_requires=['jinja2', 'markdown', 'matplotlib'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(name='unitreport', version='0.1.1', author='annahadji',
author_email='[email protected]', description=
'A small unittest-based tool for generating single page html reports in Python.'
, long_description=long_description, long_description_content_type=
'text/markdown', keywords=
'static unittest report generator Markdown plots tables', url=
'https://github.com/annahadji/unitreport', packages=['unitreport'],
package_data={'unitreport': ['templates/**']}, classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Visualization'], python_requires=
'>=3.6', install_requires=['jinja2', 'markdown', 'matplotlib'])
<|reserved_special_token_1|>
"""Distribution script for unitreport."""
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="unitreport",
version="0.1.1",
author="annahadji",
author_email="[email protected]",
description="A small unittest-based tool for generating single page html reports in Python.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="static unittest report generator Markdown plots tables",
url="https://github.com/annahadji/unitreport",
packages=["unitreport"],
package_data={"unitreport": ["templates/**"]},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering :: Visualization",
],
python_requires=">=3.6",
install_requires=["jinja2", "markdown", "matplotlib"],
)
|
flexible
|
{
"blob_id": "7a243f5e24d81d3395cc790dface5e795b9c04e6",
"index": 4495,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('README.md', 'r') as f:\n long_description = f.read()\nsetuptools.setup(name='unitreport', version='0.1.1', author='annahadji',\n author_email='[email protected]', description=\n 'A small unittest-based tool for generating single page html reports in Python.'\n , long_description=long_description, long_description_content_type=\n 'text/markdown', keywords=\n 'static unittest report generator Markdown plots tables', url=\n 'https://github.com/annahadji/unitreport', packages=['unitreport'],\n package_data={'unitreport': ['templates/**']}, classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Visualization'], python_requires=\n '>=3.6', install_requires=['jinja2', 'markdown', 'matplotlib'])\n",
"step-3": "<mask token>\nimport setuptools\nwith open('README.md', 'r') as f:\n long_description = f.read()\nsetuptools.setup(name='unitreport', version='0.1.1', author='annahadji',\n author_email='[email protected]', description=\n 'A small unittest-based tool for generating single page html reports in Python.'\n , long_description=long_description, long_description_content_type=\n 'text/markdown', keywords=\n 'static unittest report generator Markdown plots tables', url=\n 'https://github.com/annahadji/unitreport', packages=['unitreport'],\n package_data={'unitreport': ['templates/**']}, classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Visualization'], python_requires=\n '>=3.6', install_requires=['jinja2', 'markdown', 'matplotlib'])\n",
"step-4": "\"\"\"Distribution script for unitreport.\"\"\"\nimport setuptools\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"unitreport\",\n version=\"0.1.1\",\n author=\"annahadji\",\n author_email=\"[email protected]\",\n description=\"A small unittest-based tool for generating single page html reports in Python.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"static unittest report generator Markdown plots tables\",\n url=\"https://github.com/annahadji/unitreport\",\n packages=[\"unitreport\"],\n package_data={\"unitreport\": [\"templates/**\"]},\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n ],\n python_requires=\">=3.6\",\n install_requires=[\"jinja2\", \"markdown\", \"matplotlib\"],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Menu:
<|reserved_special_token_0|>
def get_menu(self, type, openid):
try:
if type == 'mine':
self.sql = (
"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC "
% openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata']}
else:
return {'code': -1}
elif type == 'main':
self.sql = 'SELECT * FROM get_menu order by watch DESC'
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata']
[0:3], 'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_0|>
def add_menu(self, data, openid):
try:
self.create_time = self.timeClass.get_time()
self.sql = (
"""
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES ('%s','%s','%s','%s','%s','%s','%s')
"""
% (openid, data.title, data.photo, data.material, data.
accessories, data.ingredient, self.create_time))
self.resql = self.mysqlClass.add_insert(self.sql, '')
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = (
"""
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,'%s','%s','%s')
"""
% (self.menu_id, step['num'], step['content'], step[
'image'], self.create_time))
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] != 'E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Menu:
def __init__(self):
self.mysqlClass = Mysql.MySQL()
self.timeClass = Utils.Time()
def get_menu(self, type, openid):
try:
if type == 'mine':
self.sql = (
"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC "
% openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata']}
else:
return {'code': -1}
elif type == 'main':
self.sql = 'SELECT * FROM get_menu order by watch DESC'
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata']
[0:3], 'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_0|>
def add_menu(self, data, openid):
try:
self.create_time = self.timeClass.get_time()
self.sql = (
"""
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES ('%s','%s','%s','%s','%s','%s','%s')
"""
% (openid, data.title, data.photo, data.material, data.
accessories, data.ingredient, self.create_time))
self.resql = self.mysqlClass.add_insert(self.sql, '')
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = (
"""
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,'%s','%s','%s')
"""
% (self.menu_id, step['num'], step['content'], step[
'image'], self.create_time))
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] != 'E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Menu:
def __init__(self):
self.mysqlClass = Mysql.MySQL()
self.timeClass = Utils.Time()
def get_menu(self, type, openid):
try:
if type == 'mine':
self.sql = (
"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC "
% openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata']}
else:
return {'code': -1}
elif type == 'main':
self.sql = 'SELECT * FROM get_menu order by watch DESC'
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata']
[0:3], 'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def get_menu_info(self, menu_id):
try:
self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
self.menu = self.resql['alldata'][0]
self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.
menu['watch'] + 1, menu_id)
self.resql3 = self.mysqlClass.insert_data(self.sql3)
print(self.resql3)
self.sql2 = (
'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '
% menu_id)
self.resql2 = self.mysqlClass.select_data(self.sql2)
self.step_list = []
if self.resql2['state'] != 'E':
for ai_menu_log in self.resql2['alldata']:
self.step_list.append(ai_menu_log)
self.menu['menu_step'] = self.step_list
return {'code': 1008, 'menu_info': self.menu}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def add_menu(self, data, openid):
try:
self.create_time = self.timeClass.get_time()
self.sql = (
"""
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES ('%s','%s','%s','%s','%s','%s','%s')
"""
% (openid, data.title, data.photo, data.material, data.
accessories, data.ingredient, self.create_time))
self.resql = self.mysqlClass.add_insert(self.sql, '')
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = (
"""
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,'%s','%s','%s')
"""
% (self.menu_id, step['num'], step['content'], step[
'image'], self.create_time))
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] != 'E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_1|>
from jox_api import label_image, Mysql, Utils
from jox_config import api_base_url
import json
class Menu:
def __init__(self):
self.mysqlClass = Mysql.MySQL()
self.timeClass = Utils.Time()
def get_menu(self, type, openid):
try:
if type == 'mine':
self.sql = (
"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC "
% openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata']}
else:
return {'code': -1}
elif type == 'main':
self.sql = 'SELECT * FROM get_menu order by watch DESC'
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata']
[0:3], 'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def get_menu_info(self, menu_id):
try:
self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
self.menu = self.resql['alldata'][0]
self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.
menu['watch'] + 1, menu_id)
self.resql3 = self.mysqlClass.insert_data(self.sql3)
print(self.resql3)
self.sql2 = (
'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '
% menu_id)
self.resql2 = self.mysqlClass.select_data(self.sql2)
self.step_list = []
if self.resql2['state'] != 'E':
for ai_menu_log in self.resql2['alldata']:
self.step_list.append(ai_menu_log)
self.menu['menu_step'] = self.step_list
return {'code': 1008, 'menu_info': self.menu}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def add_menu(self, data, openid):
try:
self.create_time = self.timeClass.get_time()
self.sql = (
"""
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES ('%s','%s','%s','%s','%s','%s','%s')
"""
% (openid, data.title, data.photo, data.material, data.
accessories, data.ingredient, self.create_time))
self.resql = self.mysqlClass.add_insert(self.sql, '')
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = (
"""
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,'%s','%s','%s')
"""
% (self.menu_id, step['num'], step['content'], step[
'image'], self.create_time))
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] != 'E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
<|reserved_special_token_1|>
from jox_api import label_image,Mysql,Utils
from jox_config import api_base_url
import json
class Menu():
def __init__(self):
self.mysqlClass = Mysql.MySQL()
self.timeClass = Utils.Time()
def get_menu(self,type,openid):
try:
if type == 'mine':
self.sql = "SELECT * FROM get_menu WHERE openid=\'%s\' order by watch DESC " % (openid)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'menu_list': self.resql['alldata'] }
else:
return {'code': -1}
elif type == 'main':
self.sql = "SELECT * FROM get_menu order by watch DESC"
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
return {'code': 1007, 'top_list': self.resql['alldata'][0:3],'menu_list': self.resql['alldata'][3:-1]}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def get_menu_info(self,menu_id):
try:
self.sql = "SELECT * FROM menu WHERE id=%s" % (menu_id)
self.resql = self.mysqlClass.select_data(self.sql)
if self.resql['state'] != 'E':
self.menu= self.resql['alldata'][0]
self.sql3 = "UPDATE menu SET watch=%s where id=%s" %(self.menu['watch']+1,menu_id)
self.resql3 = self.mysqlClass.insert_data(self.sql3)
print(self.resql3)
self.sql2 = "SELECT * FROM menu_step WHERE menu_id=%s order by num ASC " % (menu_id)
self.resql2 = self.mysqlClass.select_data(self.sql2)
self.step_list = []
if self.resql2['state'] != 'E':
for ai_menu_log in self.resql2['alldata']:
self.step_list.append(ai_menu_log)
self.menu['menu_step'] = self.step_list
return {'code': 1008, 'menu_info': self.menu}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
def add_menu(self,data,openid):
try:
self.create_time = self.timeClass.get_time()
self.sql ='''
INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date)
VALUES (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')
'''% (openid, data.title,data.photo,data.material,data.accessories,data.ingredient, self.create_time)
self.resql = self.mysqlClass.add_insert(self.sql,"")
self.conn = self.resql['conn']
self.cur = self.resql['cur']
self.menu_id = self.cur.lastrowid
steps = json.loads(data.steps)
for step in steps:
print(step['num'])
self.sql2 = '''
INSERT INTO menu_step (menu_id,num,content,image,create_date)
VALUES (%s,%d,\'%s\',\'%s\',\'%s\')
'''% (self.menu_id, step['num'],step['content'],step['image'], self.create_time)
self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)
self.conn = self.resql2['conn']
self.resql = self.mysqlClass.commit_inserst(self.conn)
if self.resql['state'] !='E':
return {'code': 1010}
else:
return {'code': -1}
except Exception as e:
print(str(e))
return {'code': -1}
|
flexible
|
{
"blob_id": "4fa9d16f979acf3edce05a209e1c6636e50fc315",
"index": 222,
"step-1": "<mask token>\n\n\nclass Menu:\n <mask token>\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n <mask token>\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-2": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n <mask token>\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-3": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self, menu_id):\n try:\n self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu = self.resql['alldata'][0]\n self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.\n menu['watch'] + 1, menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = (\n 'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '\n % menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-4": "from jox_api import label_image, Mysql, Utils\nfrom jox_config import api_base_url\nimport json\n\n\nclass Menu:\n\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self, type, openid):\n try:\n if type == 'mine':\n self.sql = (\n \"SELECT * FROM get_menu WHERE openid='%s' order by watch DESC \"\n % openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'menu_list': self.resql['alldata']}\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = 'SELECT * FROM get_menu order by watch DESC'\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata']\n [0:3], 'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self, menu_id):\n try:\n self.sql = 'SELECT * FROM menu WHERE id=%s' % menu_id\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu = self.resql['alldata'][0]\n self.sql3 = 'UPDATE menu SET watch=%s where id=%s' % (self.\n menu['watch'] + 1, menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = (\n 'SELECT * FROM menu_step WHERE menu_id=%s order by num ASC '\n % menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self, data, openid):\n try:\n self.create_time = self.timeClass.get_time()\n self.sql = (\n \"\"\"\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES ('%s','%s','%s','%s','%s','%s','%s')\n \"\"\"\n % (openid, data.title, data.photo, data.material, data.\n accessories, data.ingredient, self.create_time))\n self.resql = self.mysqlClass.add_insert(self.sql, '')\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n for step in steps:\n print(step['num'])\n self.sql2 = (\n \"\"\"\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,'%s','%s','%s')\n \"\"\"\n % (self.menu_id, step['num'], step['content'], step[\n 'image'], self.create_time))\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] != 'E':\n return {'code': 1010}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n",
"step-5": "from jox_api import label_image,Mysql,Utils\nfrom jox_config import api_base_url\nimport json\nclass Menu():\n def __init__(self):\n self.mysqlClass = Mysql.MySQL()\n self.timeClass = Utils.Time()\n\n def get_menu(self,type,openid):\n try:\n if type == 'mine':\n self.sql = \"SELECT * FROM get_menu WHERE openid=\\'%s\\' order by watch DESC \" % (openid)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n\n return {'code': 1007, 'menu_list': self.resql['alldata'] }\n else:\n return {'code': -1}\n elif type == 'main':\n self.sql = \"SELECT * FROM get_menu order by watch DESC\"\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n return {'code': 1007, 'top_list': self.resql['alldata'][0:3],'menu_list': self.resql['alldata'][3:-1]}\n else:\n return {'code': -1}\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def get_menu_info(self,menu_id):\n try:\n self.sql = \"SELECT * FROM menu WHERE id=%s\" % (menu_id)\n self.resql = self.mysqlClass.select_data(self.sql)\n if self.resql['state'] != 'E':\n self.menu= self.resql['alldata'][0]\n self.sql3 = \"UPDATE menu SET watch=%s where id=%s\" %(self.menu['watch']+1,menu_id)\n self.resql3 = self.mysqlClass.insert_data(self.sql3)\n print(self.resql3)\n self.sql2 = \"SELECT * FROM menu_step WHERE menu_id=%s order by num ASC \" % (menu_id)\n self.resql2 = self.mysqlClass.select_data(self.sql2)\n self.step_list = []\n if self.resql2['state'] != 'E':\n for ai_menu_log in self.resql2['alldata']:\n self.step_list.append(ai_menu_log)\n self.menu['menu_step'] = self.step_list\n return {'code': 1008, 'menu_info': self.menu}\n else:\n return {'code': -1}\n\n except Exception as e:\n print(str(e))\n return {'code': -1}\n\n def add_menu(self,data,openid):\n try:\n self.create_time = self.timeClass.get_time()\n\n self.sql ='''\n INSERT INTO menu (openid,title,photo,material,accessories,ingredient,create_date) \n VALUES (\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\')\n '''% (openid, data.title,data.photo,data.material,data.accessories,data.ingredient, self.create_time)\n\n self.resql = self.mysqlClass.add_insert(self.sql,\"\")\n self.conn = self.resql['conn']\n self.cur = self.resql['cur']\n self.menu_id = self.cur.lastrowid\n steps = json.loads(data.steps)\n\n for step in steps:\n print(step['num'])\n self.sql2 = '''\n INSERT INTO menu_step (menu_id,num,content,image,create_date)\n VALUES (%s,%d,\\'%s\\',\\'%s\\',\\'%s\\')\n '''% (self.menu_id, step['num'],step['content'],step['image'], self.create_time)\n self.resql2 = self.mysqlClass.add_insert(self.sql2, self.conn)\n self.conn = self.resql2['conn']\n self.resql = self.mysqlClass.commit_inserst(self.conn)\n if self.resql['state'] !='E':\n return {'code': 1010}\n else:\n return {'code': -1}\n\n except Exception as e:\n print(str(e))\n return {'code': -1}",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('./all-news.json') as f:
allNews = json.load(f)
<|reserved_special_token_0|>
with open('./recent-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
recent_news_counter += 1
if recent_news_counter >= RECENT_NEWS_COUNT:
break
f.write('</ul>\\\n')
f.write("');")
with open('./all-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
f.write('</ul>\\\n')
f.write("');")
<|reserved_special_token_1|>
RECENT_NEWS_COUNT = 5
<|reserved_special_token_0|>
with open('./all-news.json') as f:
allNews = json.load(f)
recent_news_counter = 0
with open('./recent-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
recent_news_counter += 1
if recent_news_counter >= RECENT_NEWS_COUNT:
break
f.write('</ul>\\\n')
f.write("');")
with open('./all-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
f.write('</ul>\\\n')
f.write("');")
<|reserved_special_token_1|>
RECENT_NEWS_COUNT = 5
import json
with open('./all-news.json') as f:
allNews = json.load(f)
recent_news_counter = 0
with open('./recent-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
recent_news_counter += 1
if recent_news_counter >= RECENT_NEWS_COUNT:
break
f.write('</ul>\\\n')
f.write("');")
with open('./all-news.js', 'w') as f:
f.write("document.write('\\\n")
f.write('<ul>\\\n')
for value in allNews.values():
f.write('<li>\\\n')
date, content = value['date'], value['content']
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + ' - ' + content + '\\\n')
f.write('</li>\\\n')
f.write('</ul>\\\n')
f.write("');")
<|reserved_special_token_1|>
# written by Mohammad Shahrad @UBC
RECENT_NEWS_COUNT = 5
import json
with open("./all-news.json") as f:
allNews = json.load(f)
recent_news_counter = 0
with open("./recent-news.js", "w") as f:
f.write("document.write('\\\n")
f.write("<ul>\\\n")
for value in allNews.values():
f.write("<li>\\\n")
date, content = value["date"], value["content"]
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + " - " + content + "\\\n")
f.write("</li>\\\n")
recent_news_counter += 1
if recent_news_counter >= RECENT_NEWS_COUNT:
break
f.write("</ul>\\\n")
f.write("');")
with open("./all-news.js", "w") as f:
f.write("document.write('\\\n")
f.write("<ul>\\\n")
for value in allNews.values():
f.write("<li>\\\n")
date, content = value["date"], value["content"]
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + " - " + content + "\\\n")
f.write("</li>\\\n")
f.write("</ul>\\\n")
f.write("');")
|
flexible
|
{
"blob_id": "6097840cdf4b42efaca3e197f88703d927abe889",
"index": 2548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('./all-news.json') as f:\n allNews = json.load(f)\n<mask token>\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-3": "RECENT_NEWS_COUNT = 5\n<mask token>\nwith open('./all-news.json') as f:\n allNews = json.load(f)\nrecent_news_counter = 0\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-4": "RECENT_NEWS_COUNT = 5\nimport json\nwith open('./all-news.json') as f:\n allNews = json.load(f)\nrecent_news_counter = 0\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-5": "# written by Mohammad Shahrad @UBC\n\nRECENT_NEWS_COUNT = 5\n\nimport json\n\nwith open(\"./all-news.json\") as f:\n allNews = json.load(f)\n\nrecent_news_counter = 0\nwith open(\"./recent-news.js\", \"w\") as f:\n f.write(\"document.write('\\\\\\n\")\n f.write(\"<ul>\\\\\\n\")\n for value in allNews.values():\n f.write(\"<li>\\\\\\n\")\n date, content = value[\"date\"], value[\"content\"]\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + \" - \" + content + \"\\\\\\n\")\n f.write(\"</li>\\\\\\n\")\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write(\"</ul>\\\\\\n\")\n f.write(\"');\")\n\nwith open(\"./all-news.js\", \"w\") as f:\n f.write(\"document.write('\\\\\\n\")\n f.write(\"<ul>\\\\\\n\")\n for value in allNews.values():\n f.write(\"<li>\\\\\\n\")\n date, content = value[\"date\"], value[\"content\"]\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + \" - \" + content + \"\\\\\\n\")\n f.write(\"</li>\\\\\\n\")\n f.write(\"</ul>\\\\\\n\")\n f.write(\"');\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
x = str(input("please input your name:"))
y = int(input("please input your age:"))
p = int(2017-y+100)
print("your name is:"+x)
print (p)
|
normal
|
{
"blob_id": "929f580e8e559f8309e19f72208bf4ff0d537668",
"index": 4935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('your name is:' + x)\nprint(p)\n",
"step-3": "x = str(input('please input your name:'))\ny = int(input('please input your age:'))\np = int(2017 - y + 100)\nprint('your name is:' + x)\nprint(p)\n",
"step-4": "x = str(input(\"please input your name:\"))\ny = int(input(\"please input your age:\"))\n\np = int(2017-y+100)\n\nprint(\"your name is:\"+x)\nprint (p)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class HDF5_Parser(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HDF5_Parser(object):
<|reserved_special_token_0|>
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HDF5_Parser(object):
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
<|reserved_special_token_1|>
import h5py
class HDF5_Parser(object):
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
<|reserved_special_token_1|>
#!/usr/bin/env python
import h5py
class HDF5_Parser(object): # noqa: N801
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
|
flexible
|
{
"blob_id": "0beb5c5c5db9247d66a5a49cfff7282ead52a9b7",
"index": 716,
"step-1": "<mask token>\n\n\nclass HDF5_Parser(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-2": "<mask token>\n\n\nclass HDF5_Parser(object):\n <mask token>\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-3": "<mask token>\n\n\nclass HDF5_Parser(object):\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-4": "import h5py\n\n\nclass HDF5_Parser(object):\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-5": "#!/usr/bin/env python\n\nimport h5py\n\n\nclass HDF5_Parser(object): # noqa: N801\n \"\"\"\n\n Examples\n --------\n\n >>> import h5py\n >>> indata = h5py.File('test.hdf5')\n >>> dataset = indata.create_dataset(\"mydataset\", (10,), dtype='i')\n >>> indata.close()\n\n >>> with open('test.hdf5') as f:\n ... data = HDF5_Parser().read_file(f)\n >>> data['mydataset'][:]\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)\n\n >>> import os\n >>> os.remove('test.hdf5')\n\n \"\"\"\n\n plugin_name = 'hdf5.read'\n plugin_descript = 'read *.hdf5 (in read mode) files using h5py'\n file_regex = '*.hdf5'\n\n def read_file(self, file_obj, **kwargs):\n return h5py.File(file_obj.name, mode='r')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def emphasize(sentence):
words = sentence.split(" ")
for i, word in enumerate(words):
words[i] = word[0].upper() + word[1:].lower()
return " ".join(words)
exp1 = "Hello World"
ans1 = emphasize("hello world")
assert ans1 == exp1, f"expected {exp1}, got {ans1}"
exp2 = "Good Morning"
ans2 = emphasize("GOOD MORNING")
assert ans2 == exp2, f"expected {exp2}, got {ans2}"
exp3 = "99 Red Balloons!"
ans3 = emphasize("99 red balloons!")
assert ans3 == exp3, f"expected {exp3}, got {ans3}"
print("everything okay")
|
normal
|
{
"blob_id": "518dcdca8f5e6b42624083e4327143dfba59b2ba",
"index": 9785,
"step-1": "<mask token>\n",
"step-2": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\n",
"step-3": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\n<mask token>\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\n<mask token>\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\n<mask token>\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-4": "def emphasize(sentence):\n words = sentence.split(' ')\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return ' '.join(words)\n\n\nexp1 = 'Hello World'\nans1 = emphasize('hello world')\nassert ans1 == exp1, f'expected {exp1}, got {ans1}'\nexp2 = 'Good Morning'\nans2 = emphasize('GOOD MORNING')\nassert ans2 == exp2, f'expected {exp2}, got {ans2}'\nexp3 = '99 Red Balloons!'\nans3 = emphasize('99 red balloons!')\nassert ans3 == exp3, f'expected {exp3}, got {ans3}'\nprint('everything okay')\n",
"step-5": "def emphasize(sentence):\n words = sentence.split(\" \")\n for i, word in enumerate(words):\n words[i] = word[0].upper() + word[1:].lower()\n return \" \".join(words)\n\n\nexp1 = \"Hello World\"\nans1 = emphasize(\"hello world\")\nassert ans1 == exp1, f\"expected {exp1}, got {ans1}\"\n\nexp2 = \"Good Morning\"\nans2 = emphasize(\"GOOD MORNING\")\nassert ans2 == exp2, f\"expected {exp2}, got {ans2}\"\n\nexp3 = \"99 Red Balloons!\"\nans3 = emphasize(\"99 red balloons!\")\nassert ans3 == exp3, f\"expected {exp3}, got {ans3}\"\n\nprint(\"everything okay\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hilma import Mesh, loadPly, savePly
mesh = Mesh()
loadPly("head.ply", mesh)
verts = []
faces = []
edges = []
uvs = []
for v in mesh.getVertices():
verts.append( (v.x, v.y, v.z) )
for t in mesh.getTrianglesIndices():
faces.append( (t.x, t.y, t.z ) )
for e in mesh.getLinesIndices():
edges.append( (e.x, e.y) )
# print( verts )
# print( faces )
# print(edges)
savePly("out.ply", mesh, False)
|
normal
|
{
"blob_id": "c02af2ecd980da4ceff133c13072ad7c6b724041",
"index": 5329,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nloadPly('head.ply', mesh)\n<mask token>\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-3": "<mask token>\nmesh = Mesh()\nloadPly('head.ply', mesh)\nverts = []\nfaces = []\nedges = []\nuvs = []\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom hilma import Mesh, loadPly, savePly\nmesh = Mesh()\nloadPly('head.ply', mesh)\nverts = []\nfaces = []\nedges = []\nuvs = []\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom hilma import Mesh, loadPly, savePly\n\nmesh = Mesh()\nloadPly(\"head.ply\", mesh)\n\nverts = []\nfaces = []\nedges = []\nuvs = []\n\n\nfor v in mesh.getVertices():\n verts.append( (v.x, v.y, v.z) )\n\nfor t in mesh.getTrianglesIndices():\n faces.append( (t.x, t.y, t.z ) )\n\nfor e in mesh.getLinesIndices():\n edges.append( (e.x, e.y) )\n\n# print( verts )\n# print( faces )\n# print(edges)\n\nsavePly(\"out.ply\", mesh, False)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView, View
from assessments.models import (Mine, Company,
QuestionCategory, Question, Assessment, Response)
class Home(View):
def get(self, request):
return render(request, 'home.html')
class MineList(ListView):
model = Mine
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY
return context
class MineDetail(DetailView):
model = Mine
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(
request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request,
'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail',
kwargs={'pk':assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(
name=request.POST.get('company')
)
mine, created = Mine.objects.get_or_create(
name=request.POST.get('mine'),
company=company,
location=request.POST.get('location')
)
assessment = Assessment.objects.create(
mine=mine,
)
if request.user.is_authenticated:
assessment.user =request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(
question=question,
response=self.get_response(value),
assessment=assessment
)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(
response__assessment=assessment).distinct()
for question in remaining_questions:
Response.objects.create(
assessment=assessment,
question=question,
)
|
normal
|
{
"blob_id": "d296e528d399ee772039777d139a1d8271711ee9",
"index": 2146,
"step-1": "<mask token>\n\n\nclass AssessmentList(ListView):\n model = Assessment\n\n\nclass AssessmentDetail(DetailView):\n model = Assessment\n\n\nclass AnswerQuestions(ListView):\n model = Question\n\n def post(self, request):\n company, mine, assessment = self.get_assessment(request)\n for key, value in request.POST.items():\n print(key, value)\n self.create_response(key, value, assessment)\n self.add_null_responses(assessment)\n messages.success(request, 'Assessment Received; Thank You!')\n return redirect(reverse('assessment_detail', kwargs={'pk':\n assessment.id}))\n\n def get_assessment(self, request):\n company, created = Company.objects.get_or_create(name=request.POST.\n get('company'))\n mine, created = Mine.objects.get_or_create(name=request.POST.get(\n 'mine'), company=company, location=request.POST.get('location'))\n assessment = Assessment.objects.create(mine=mine)\n if request.user.is_authenticated:\n assessment.user = request.user\n assessment.save()\n return company, mine, assessment\n\n def create_response(self, key, value, assessment):\n try:\n question = Question.objects.get(id=int(key))\n response = Response.objects.create(question=question, response=\n self.get_response(value), assessment=assessment)\n except Exception as error:\n print(error)\n\n def get_response(self, response):\n if response == 'True':\n return True\n else:\n return False\n\n def add_null_responses(self, assessment):\n remaining_questions = Question.objects.exclude(response__assessment\n =assessment).distinct()\n for question in remaining_questions:\n Response.objects.create(assessment=assessment, question=question)\n",
"step-2": "<mask token>\n\n\nclass MineList(ListView):\n <mask token>\n <mask token>\n\n\nclass MineDetail(DetailView):\n model = Mine\n\n\nclass AssessmentList(ListView):\n model = Assessment\n\n\nclass AssessmentDetail(DetailView):\n model = Assessment\n\n\nclass AnswerQuestions(ListView):\n model = Question\n\n def post(self, request):\n company, mine, assessment = self.get_assessment(request)\n for key, value in request.POST.items():\n print(key, value)\n self.create_response(key, value, assessment)\n self.add_null_responses(assessment)\n messages.success(request, 'Assessment Received; Thank You!')\n return redirect(reverse('assessment_detail', kwargs={'pk':\n assessment.id}))\n\n def get_assessment(self, request):\n company, created = Company.objects.get_or_create(name=request.POST.\n get('company'))\n mine, created = Mine.objects.get_or_create(name=request.POST.get(\n 'mine'), company=company, location=request.POST.get('location'))\n assessment = Assessment.objects.create(mine=mine)\n if request.user.is_authenticated:\n assessment.user = request.user\n assessment.save()\n return company, mine, assessment\n\n def create_response(self, key, value, assessment):\n try:\n question = Question.objects.get(id=int(key))\n response = Response.objects.create(question=question, response=\n self.get_response(value), assessment=assessment)\n except Exception as error:\n print(error)\n\n def get_response(self, response):\n if response == 'True':\n return True\n else:\n return False\n\n def add_null_responses(self, assessment):\n remaining_questions = Question.objects.exclude(response__assessment\n =assessment).distinct()\n for question in remaining_questions:\n Response.objects.create(assessment=assessment, question=question)\n",
"step-3": "<mask token>\n\n\nclass MineList(ListView):\n model = Mine\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY\n return context\n\n\nclass MineDetail(DetailView):\n model = Mine\n\n\nclass AssessmentList(ListView):\n model = Assessment\n\n\nclass AssessmentDetail(DetailView):\n model = Assessment\n\n\nclass AnswerQuestions(ListView):\n model = Question\n\n def post(self, request):\n company, mine, assessment = self.get_assessment(request)\n for key, value in request.POST.items():\n print(key, value)\n self.create_response(key, value, assessment)\n self.add_null_responses(assessment)\n messages.success(request, 'Assessment Received; Thank You!')\n return redirect(reverse('assessment_detail', kwargs={'pk':\n assessment.id}))\n\n def get_assessment(self, request):\n company, created = Company.objects.get_or_create(name=request.POST.\n get('company'))\n mine, created = Mine.objects.get_or_create(name=request.POST.get(\n 'mine'), company=company, location=request.POST.get('location'))\n assessment = Assessment.objects.create(mine=mine)\n if request.user.is_authenticated:\n assessment.user = request.user\n assessment.save()\n return company, mine, assessment\n\n def create_response(self, key, value, assessment):\n try:\n question = Question.objects.get(id=int(key))\n response = Response.objects.create(question=question, response=\n self.get_response(value), assessment=assessment)\n except Exception as error:\n print(error)\n\n def get_response(self, response):\n if response == 'True':\n return True\n else:\n return False\n\n def add_null_responses(self, assessment):\n remaining_questions = Question.objects.exclude(response__assessment\n =assessment).distinct()\n for question in remaining_questions:\n Response.objects.create(assessment=assessment, question=question)\n",
"step-4": "<mask token>\n\n\nclass Home(View):\n <mask token>\n\n\nclass MineList(ListView):\n model = Mine\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY\n return context\n\n\nclass MineDetail(DetailView):\n model = Mine\n\n\nclass AssessmentList(ListView):\n model = Assessment\n\n\nclass AssessmentDetail(DetailView):\n model = Assessment\n\n\nclass AnswerQuestions(ListView):\n model = Question\n\n def post(self, request):\n company, mine, assessment = self.get_assessment(request)\n for key, value in request.POST.items():\n print(key, value)\n self.create_response(key, value, assessment)\n self.add_null_responses(assessment)\n messages.success(request, 'Assessment Received; Thank You!')\n return redirect(reverse('assessment_detail', kwargs={'pk':\n assessment.id}))\n\n def get_assessment(self, request):\n company, created = Company.objects.get_or_create(name=request.POST.\n get('company'))\n mine, created = Mine.objects.get_or_create(name=request.POST.get(\n 'mine'), company=company, location=request.POST.get('location'))\n assessment = Assessment.objects.create(mine=mine)\n if request.user.is_authenticated:\n assessment.user = request.user\n assessment.save()\n return company, mine, assessment\n\n def create_response(self, key, value, assessment):\n try:\n question = Question.objects.get(id=int(key))\n response = Response.objects.create(question=question, response=\n self.get_response(value), assessment=assessment)\n except Exception as error:\n print(error)\n\n def get_response(self, response):\n if response == 'True':\n return True\n else:\n return False\n\n def add_null_responses(self, assessment):\n remaining_questions = Question.objects.exclude(response__assessment\n =assessment).distinct()\n for question in remaining_questions:\n Response.objects.create(assessment=assessment, question=question)\n",
"step-5": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import DetailView, ListView, View\n\nfrom assessments.models import (Mine, Company,\n QuestionCategory, Question, Assessment, Response)\n\nclass Home(View):\n\n def get(self, request):\n return render(request, 'home.html')\n\nclass MineList(ListView):\n model = Mine\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY\n return context\n\nclass MineDetail(DetailView):\n model = Mine\n\nclass AssessmentList(ListView):\n model = Assessment\n\nclass AssessmentDetail(DetailView):\n model = Assessment\n\nclass AnswerQuestions(ListView):\n model = Question\n\n def post(self, request):\n company, mine, assessment = self.get_assessment(\n request)\n for key, value in request.POST.items():\n print(key, value)\n self.create_response(key, value, assessment)\n self.add_null_responses(assessment)\n messages.success(request, \n 'Assessment Received; Thank You!')\n return redirect(reverse('assessment_detail', \n kwargs={'pk':assessment.id}))\n\n def get_assessment(self, request):\n company, created = Company.objects.get_or_create(\n name=request.POST.get('company')\n )\n mine, created = Mine.objects.get_or_create(\n name=request.POST.get('mine'),\n company=company,\n location=request.POST.get('location')\n )\n assessment = Assessment.objects.create(\n mine=mine,\n )\n if request.user.is_authenticated:\n assessment.user =request.user\n assessment.save()\n return company, mine, assessment\n\n def create_response(self, key, value, assessment):\n try:\n question = Question.objects.get(id=int(key))\n response = Response.objects.create(\n question=question,\n response=self.get_response(value),\n assessment=assessment\n )\n except Exception as error:\n print(error)\n\n def get_response(self, response):\n if response == 'True':\n return True\n else:\n return False\n\n def add_null_responses(self, assessment):\n remaining_questions = Question.objects.exclude(\n response__assessment=assessment).distinct()\n for question in remaining_questions:\n Response.objects.create(\n assessment=assessment,\n question=question,\n )\n\n\n",
"step-ids": [
11,
14,
16,
17,
20
]
}
|
[
11,
14,
16,
17,
20
] |
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 1e-3)
m.bias.data.fill_(0.)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#--------------------------------
# Device configuration
#--------------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: %s'%device)
#--------------------------------
# Hyper-parameters
#--------------------------------
input_size = 3
num_classes = 10
hidden_size = [128, 512, 512, 512, 512]
num_epochs = 20
batch_size = 200
learning_rate = 2e-3
learning_rate_decay = 0.95
reg=0.001
num_training= 49000
num_validation =1000
norm_layer = None #norm_layer="BN"
print(hidden_size)
dropout_p = 0 #probability of dropout
#-------------------------------------------------
# Load the CIFAR-10 dataset
#-------------------------------------------------
#################################################################################
# TODO: Q3.a Choose the right data augmentation transforms with the right #
# hyper-parameters and put them in the data_aug_transforms variable #
#################################################################################
data_aug_transforms = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
data_aug_transforms += [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(2),
transforms.RandomGrayscale(),
transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),
transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),
]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
norm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
cifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=True,
transform=norm_transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=False,
transform=test_transform
)
#-------------------------------------------------
# Prepare the training and validation splits
#-------------------------------------------------
mask = list(range(num_training))
train_dataset = torch.utils.data.Subset(cifar_dataset, mask)
mask = list(range(num_training, num_training + num_validation))
val_dataset = torch.utils.data.Subset(cifar_dataset, mask)
#-------------------------------------------------
# Data loader
#-------------------------------------------------
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#-------------------------------------------------
# Convolutional neural network (Q1.a and Q2.a)
# Set norm_layer for different networks whether using batch normalization
#-------------------------------------------------
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):
super(ConvNet, self).__init__()
#################################################################################
# TODO: Initialize the modules required to implement the convolutional layer #
# described in the exercise. #
# For Q1.a make use of conv2d and relu layers from the torch.nn module. #
# For Q2.a make use of BatchNorm2d layer from the torch.nn module. #
# For Q3.b Use Dropout layer from the torch.nn module. #
#################################################################################
layers = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# Adding the other blocks
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# stacking convolutional blocks
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
# Fully connected layer
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
def forward(self, x):
#################################################################################
# TODO: Implement the forward pass computations #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return out
#-------------------------------------------------
# Calculate the model size (Q1.b)
# if disp is true, print the model parameters, otherwise, only return the number of parameters.
#-------------------------------------------------
def PrintModelSize(model, disp=True):
#################################################################################
# TODO: Implement the function to count the number of trainable parameters in #
# the input model. This useful to track the capacity of the model you are #
# training #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print("\nNumber of parameters: ", model_sz)
print("\n")
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model_sz
#-------------------------------------------------
# Calculate the model size (Q1.c)
# visualize the convolution filters of the first convolution layer of the input model
#-------------------------------------------------
def VisualizeFilter(model):
#################################################################################
# TODO: Implement the functiont to visualize the weights in the first conv layer#
# in the model. Visualize them as a single image of stacked filters. #
# You can use matlplotlib.imshow to visualize an image in python #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to("cpu")
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8*4,4):
for j in range(0, 16*4, 4):
kernel_map[i:i+3, j:j+3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#======================================================================================
# Q1.a: Implementing convolutional neural net in PyTorch
#======================================================================================
# In this question we will implement a convolutional neural networks using the PyTorch
# library. Please complete the code for the ConvNet class evaluating the model
#--------------------------------------------------------------------------------------
model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)
# Q2.a - Initialize the model with correct batch norm layer
model.apply(weights_init)
# Print the model
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
# Print model size
#======================================================================================
# Q1.b: Implementing the function to count the number of trainable parameters in the model
#======================================================================================
PrintModelSize(model)
#======================================================================================
# Q1.a: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
#======================================================================================
#VisualizeFilter(model)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)
# Train the model
lr = learning_rate
total_step = len(train_loader)
loss_train = []
loss_val = []
best_accuracy = 0
accuracy_val = []
best_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance
#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
loss_train.append(loss_iter/(len(train_loader)*batch_size))
# Code to update the lr
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter/(len(val_loader)*batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to save the model which has #
# the model with the best validation accuracy so-far (use best_model). #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy=accuracy
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#
# best model so far and perform testing with this model. #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model.load_state_dict(best_model.state_dict())
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#Compute accuracy on the test set
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))
# Q1.c: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
VisualizeFilter(model)
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
normal
|
{
"blob_id": "0553bd4c7261197a1a80c5551305a16e7bfdc761",
"index": 2398,
"step-1": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\nprint('Using device: %s' % device)\n<mask token>\nprint(hidden_size)\n<mask token>\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\n<mask token>\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-4": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s' % device)\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 0.002\nlearning_rate_decay = 0.95\nreg = 0.001\nnum_training = 49000\nnum_validation = 1000\nnorm_layer = None\nprint(hidden_size)\ndropout_p = 0\ndata_aug_transforms = []\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\nnorm_transform = transforms.Compose(data_aug_transforms + [transforms.\n ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ntest_transform = transforms.Compose([transforms.ToTensor(), transforms.\n Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=True,\n transform=norm_transform, download=True)\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=False,\n transform=test_transform)\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, shuffle=True)\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=\n batch_size, shuffle=False)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=\n batch_size, shuffle=False)\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer\n ).to(device)\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=reg)\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=\n norm_layer)\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 1e-3)\n m.bias.data.fill_(0.)\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n\n#--------------------------------\n# Device configuration\n#--------------------------------\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s'%device)\n\n#--------------------------------\n# Hyper-parameters\n#--------------------------------\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 2e-3\nlearning_rate_decay = 0.95\nreg=0.001\nnum_training= 49000\nnum_validation =1000\nnorm_layer = None #norm_layer=\"BN\"\nprint(hidden_size)\n\ndropout_p = 0 #probability of dropout\n\n\n\n#-------------------------------------------------\n# Load the CIFAR-10 dataset\n#-------------------------------------------------\n#################################################################################\n# TODO: Q3.a Choose the right data augmentation transforms with the right #\n# hyper-parameters and put them in the data_aug_transforms variable #\n#################################################################################\ndata_aug_transforms = []\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), \n transforms.RandomHorizontalFlip(), \n transforms.RandomVerticalFlip(), \n transforms.RandomRotation(2),\n transforms.RandomGrayscale(),\n transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),\n transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),\n ]\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nnorm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ntest_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=True,\n transform=norm_transform,\n download=True)\n\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=False,\n transform=test_transform\n )\n\n#-------------------------------------------------\n# Prepare the training and validation splits\n#-------------------------------------------------\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\n\n#-------------------------------------------------\n# Data loader\n#-------------------------------------------------\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n\n#-------------------------------------------------\n# Convolutional neural network (Q1.a and Q2.a)\n# Set norm_layer for different networks whether using batch normalization\n#-------------------------------------------------\nclass ConvNet(nn.Module):\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):\n super(ConvNet, self).__init__()\n #################################################################################\n # TODO: Initialize the modules required to implement the convolutional layer #\n # described in the exercise. #\n # For Q1.a make use of conv2d and relu layers from the torch.nn module. #\n # For Q2.a make use of BatchNorm2d layer from the torch.nn module. #\n # For Q3.b Use Dropout layer from the torch.nn module. #\n #################################################################################\n layers = []\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # Adding the other blocks\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n \n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\t\t\n # stacking convolutional blocks\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n\n # Fully connected layer\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n def forward(self, x):\n #################################################################################\n # TODO: Implement the forward pass computations #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return out\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.b)\n# if disp is true, print the model parameters, otherwise, only return the number of parameters.\n#-------------------------------------------------\ndef PrintModelSize(model, disp=True):\n #################################################################################\n # TODO: Implement the function to count the number of trainable parameters in #\n # the input model. This useful to track the capacity of the model you are #\n # training #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print(\"\\nNumber of parameters: \", model_sz)\n print(\"\\n\")\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return model_sz\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.c)\n# visualize the convolution filters of the first convolution layer of the input model\n#-------------------------------------------------\ndef VisualizeFilter(model):\n #################################################################################\n # TODO: Implement the functiont to visualize the weights in the first conv layer#\n # in the model. Visualize them as a single image of stacked filters. #\n # You can use matlplotlib.imshow to visualize an image in python #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))\n\n kernels = list(model.parameters())[0]\n kernels = kernels.to(\"cpu\")\n kernels = kernels.data.numpy()\n\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n\n cnt = 0\n for i in range(0, 8*4,4):\n for j in range(0, 16*4, 4):\n kernel_map[i:i+3, j:j+3, :] = kernels[cnt]\n cnt = cnt + 1\n\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n\n pass\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n\n\n#======================================================================================\n# Q1.a: Implementing convolutional neural net in PyTorch\n#======================================================================================\n# In this question we will implement a convolutional neural networks using the PyTorch\n# library. Please complete the code for the ConvNet class evaluating the model\n#--------------------------------------------------------------------------------------\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)\n# Q2.a - Initialize the model with correct batch norm layer\n\nmodel.apply(weights_init)\n# Print the model\nprint(model)\n\nfor i, (images, labels) in enumerate(train_loader):\n\timages = images.to(device)\n\n\tbreak\n\n# Print model size\n#======================================================================================\n# Q1.b: Implementing the function to count the number of trainable parameters in the model\n#======================================================================================\nPrintModelSize(model)\n#======================================================================================\n# Q1.a: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\n#======================================================================================\n#VisualizeFilter(model)\n\n\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)\n\n# Train the model\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance\n#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)\nfor epoch in range(num_epochs):\n\n model.train()\n\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n # Move tensors to the configured device\n images = images.to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_iter += loss.item()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n \n loss_train.append(loss_iter/(len(train_loader)*batch_size))\n\n \n # Code to update the lr\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n \n \n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n \n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n \n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n \n loss_val.append(loss_iter/(len(val_loader)*batch_size))\n\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n #################################################################################\n # TODO: Q2.b Implement the early stopping mechanism to save the model which has #\n # the model with the best validation accuracy so-far (use best_model). #\n #################################################################################\n\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy=accuracy\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n \n\n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\nmodel.eval()\n\n\n\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\n\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\n\n\n\n#################################################################################\n# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#\n# best model so far and perform testing with this model. #\n#################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nmodel.load_state_dict(best_model.state_dict())\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n#Compute accuracy on the test set\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n\n print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))\n\n\n\n# Q1.c: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\nVisualizeFilter(model)\n\n\n\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'model.ckpt')\n\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
<|reserved_special_token_0|>
class RiskType(models.Model):
<|reserved_special_token_0|>
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = 'name',
def __str__(self):
return self.name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = 'name',
def __str__(self):
return self.name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = 'name',
def __str__(self):
return self.name
eav.register(RiskType)
<|reserved_special_token_1|>
from django.db import models
import eav
from django.utils import timezone
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = 'name',
def __str__(self):
return self.name
eav.register(RiskType)
<|reserved_special_token_1|>
from django.db import models
import eav
from django.utils import timezone
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
eav.register(RiskType)
|
flexible
|
{
"blob_id": "635b75bc12718bccdfb9d04a54476c93fa4685ce",
"index": 4661,
"step-1": "<mask token>\n\n\nclass RiskType(models.Model):\n <mask token>\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-4": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-5": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent(
"""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
"""
).encode('ascii'))
else:
return Output(stdout=dedent(
"""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
"""
).encode('ascii'), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent(
"""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
"""
.format('ii' if self._installed else 'un')).encode('ascii'))
<|reserved_special_token_0|>
def _apt_get_update(self):
return Output()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else
b'package sg3_utils is not installed', returncode=0 if self.
_installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if '-q' in command:
return self._rpm_query()
elif 'install' in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'
Solaris_v2 = b' VERSION: 5.14.2.5'
Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'
Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'
rpm_v1 = b'4.8-7.el7'
rpm_v2 = b'18.168.6.1-34.el7'
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision':
'08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Output(object):
def __init__(self, returncode=0, stdout='', stderr=''):
super(Output, self).__init__()
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def get_stdout(self):
return self._stdout
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent(
"""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
"""
).encode('ascii'))
else:
return Output(stdout=dedent(
"""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
"""
).encode('ascii'), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent(
"""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
"""
.format('ii' if self._installed else 'un')).encode('ascii'))
def _apt_get_install(self):
self._installed = True
return Output()
def _apt_get_update(self):
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if 'dpkg-query' in command:
if '-s' in command:
return self._dpkg_query_s()
if '-l' in command:
return self._dpkg_query_l()
elif 'apt-get install' in ' '.join(command):
return self._apt_get_install()
elif 'apt-get update' in ' '.join(command):
return self._apt_get_update()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_sg3_utils()
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else
b'package sg3_utils is not installed', returncode=0 if self.
_installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if '-q' in command:
return self._rpm_query()
elif 'install' in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'
Solaris_v2 = b' VERSION: 5.14.2.5'
Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'
Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'
rpm_v1 = b'4.8-7.el7'
rpm_v2 = b'18.168.6.1-34.el7'
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision':
'08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestOnRedHat(unittest.TestCase):
def _running_on_redhat(self):
return distro.id() == 'rhel'
def setUp(self):
super(TestOnRedHat, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_redhat():
raise self.skipTest('This test runs only on red hat')
if not RootPermissions().is_root():
raise self.skipTest('This test must run with root permissions')
def test_sg3_utils(self):
self._check_package('sg3_utils', '/usr/bin/sg_inq')
def _check_package(self, package_name, executable_name):
pkgmgr = RedHatPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(
package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
pkgmgr.install_package(package_name
) if not is_installed_before else pkgmgr.remove_package(
package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
class Output(object):
def __init__(self, returncode=0, stdout='', stderr=''):
super(Output, self).__init__()
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def get_stdout(self):
return self._stdout
def get_stderr(self):
return self._stderr
def get_returncode(self):
return self._returncode
def wait(self, timeout=None):
pass
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent(
"""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
"""
).encode('ascii'))
else:
return Output(stdout=dedent(
"""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
"""
).encode('ascii'), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent(
"""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
"""
.format('ii' if self._installed else 'un')).encode('ascii'))
def _apt_get_install(self):
self._installed = True
return Output()
def _apt_get_update(self):
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if 'dpkg-query' in command:
if '-s' in command:
return self._dpkg_query_s()
if '-l' in command:
return self._dpkg_query_l()
elif 'apt-get install' in ' '.join(command):
return self._apt_get_install()
elif 'apt-get update' in ' '.join(command):
return self._apt_get_update()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_sg3_utils()
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else
b'package sg3_utils is not installed', returncode=0 if self.
_installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if '-q' in command:
return self._rpm_query()
elif 'install' in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'
Solaris_v2 = b' VERSION: 5.14.2.5'
Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'
Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'
rpm_v1 = b'4.8-7.el7'
rpm_v2 = b'18.168.6.1-34.el7'
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision':
'08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestOnUbuntu(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _should_skip(self):
if not self._running_on_ubuntu():
raise self.skipTest('This test runs only on ubuntu')
if not RootPermissions().is_root():
raise self.skipTest('This test must run with root permissions')
<|reserved_special_token_0|>
def _check_package(self, package_name, executable_name):
pkgmgr = UbuntuPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(
package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
pkgmgr.install_package(package_name
) if not is_installed_before else pkgmgr.remove_package(
package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestOnRedHat(unittest.TestCase):
def _running_on_redhat(self):
return distro.id() == 'rhel'
def setUp(self):
super(TestOnRedHat, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_redhat():
raise self.skipTest('This test runs only on red hat')
if not RootPermissions().is_root():
raise self.skipTest('This test must run with root permissions')
def test_sg3_utils(self):
self._check_package('sg3_utils', '/usr/bin/sg_inq')
def _check_package(self, package_name, executable_name):
pkgmgr = RedHatPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(
package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
pkgmgr.install_package(package_name
) if not is_installed_before else pkgmgr.remove_package(
package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name),
is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
class Output(object):
def __init__(self, returncode=0, stdout='', stderr=''):
super(Output, self).__init__()
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def get_stdout(self):
return self._stdout
def get_stderr(self):
return self._stderr
def get_returncode(self):
return self._returncode
def wait(self, timeout=None):
pass
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent(
"""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
"""
).encode('ascii'))
else:
return Output(stdout=dedent(
"""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
"""
).encode('ascii'), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent(
"""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
"""
.format('ii' if self._installed else 'un')).encode('ascii'))
def _apt_get_install(self):
self._installed = True
return Output()
def _apt_get_update(self):
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if 'dpkg-query' in command:
if '-s' in command:
return self._dpkg_query_s()
if '-l' in command:
return self._dpkg_query_l()
elif 'apt-get install' in ' '.join(command):
return self._apt_get_install()
elif 'apt-get update' in ' '.join(command):
return self._apt_get_update()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_sg3_utils()
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else
b'package sg3_utils is not installed', returncode=0 if self.
_installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch('infi.execute.execute') as execute:
def side_effect(*args, **kwargs):
command = args[0]
if '-q' in command:
return self._rpm_query()
elif 'install' in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'
Solaris_v2 = b' VERSION: 5.14.2.5'
Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'
Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'
rpm_v1 = b'4.8-7.el7'
rpm_v2 = b'18.168.6.1-34.el7'
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision':
'08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.
Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.
Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
<|reserved_special_token_1|>
from . import UbuntuPackageManager, RedHatPackageManager, SolarisPackageManager, RpmMixin
from infi import unittest
from infi.run_as_root import RootPermissions
from contextlib import contextmanager
from infi import pkgmgr
from mock import patch
import distro
# pylint: disable-all
class TestOnUbuntu(unittest.TestCase):
def _running_on_ubuntu(self):
return distro.id() == "ubuntu"
def setUp(self):
super(TestOnUbuntu, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_ubuntu():
raise self.skipTest("This test runs only on ubuntu")
if not RootPermissions().is_root():
raise self.skipTest("This test must run with root permissions")
def test_sg3_utils(self):
from infi.execute import execute
execute('apt-get update'.split())
self._check_package("sg3-utils", "/usr/bin/sg_inq")
def _check_package(self, package_name, executable_name):
pkgmgr = UbuntuPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
# Do the opposite
pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
def test_check_unknown_package(self):
pkgmgr = UbuntuPackageManager()
self.assertFalse(pkgmgr.is_package_installed('blablabla9988ok'))
class TestOnRedHat(unittest.TestCase):
def _running_on_redhat(self):
return distro.id() == "rhel"
def setUp(self):
super(TestOnRedHat, self).setUp()
self._should_skip()
def _should_skip(self):
if not self._running_on_redhat():
raise self.skipTest("This test runs only on red hat")
if not RootPermissions().is_root():
raise self.skipTest("This test must run with root permissions")
def test_sg3_utils(self):
self._check_package("sg3_utils", "/usr/bin/sg_inq")
def _check_package(self, package_name, executable_name):
pkgmgr = RedHatPackageManager()
is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)
self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
# Do the opposite
pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)
self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)
def _is_package_seems_to_be_installed(self, package_name, executable_name):
from os.path import exists
return exists(executable_name)
class Output(object):
def __init__(self, returncode=0, stdout='', stderr=''):
super(Output, self).__init__()
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def get_stdout(self):
return self._stdout
def get_stderr(self):
return self._stderr
def get_returncode(self):
return self._returncode
def wait(self, timeout=None):
pass
class TestUbuntuMock(TestOnUbuntu):
def _should_skip(self):
pass
def _dpkg_query_s(self):
from textwrap import dedent
if self._installed:
return Output(stdout=dedent("""
Package: sg3-utils
Status: installed ok installed
Priority: optional
Version: 1.30-1
Section: admin
""").encode("ascii"))
else:
return Output(stdout=dedent("""
dpkg-query: package sg3-utils is not installed and no information is available
Use dpkg --info (= dpkg-deb --info) to examine archive files,
and dpkg --contents (= dpkg-deb --contents) to list their contents.
""").encode("ascii"), returncode=1)
def _dpkg_query_l(self):
from textwrap import dedent
return Output(stdout=dedent("""
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name Version Architecture Description
+++-===========================-==================-==================-===========================================================
{} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set
""".format("ii" if self._installed else "un")).encode("ascii"))
def _apt_get_install(self):
self._installed = True
return Output()
def _apt_get_update(self):
return Output()
@contextmanager
def _apply_patches(self):
with patch("infi.execute.execute") as execute:
def side_effect(*args, **kwargs):
command = args[0]
if "dpkg-query" in command:
if "-s" in command:
return self._dpkg_query_s()
if "-l" in command:
return self._dpkg_query_l()
elif "apt-get install" in ' '.join(command):
return self._apt_get_install()
elif "apt-get update" in ' '.join(command):
return self._apt_get_update()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_sg3_utils()
def test_check_unknown_package(self):
with self._apply_patches():
super(TestUbuntuMock, self).test_check_unknown_package()
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class TestRedHatMock(TestOnRedHat):
def _should_skip(self):
pass
def _rpm_query(self):
return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else b'package sg3_utils is not installed',
returncode=0 if self._installed else 1)
def _yum_install(self):
self._installed = True
return Output()
@contextmanager
def _apply_patches(self):
with patch("infi.execute.execute") as execute:
def side_effect(*args, **kwargs):
command = args[0]
if "-q" in command:
return self._rpm_query()
elif "install" in command:
return self._yum_install()
raise NotImplementedError()
execute.side_effect = side_effect
yield
def test_sg3_utils(self):
with self._apply_patches():
super(TestRedHatMock, self).test_sg3_utils()
pass
def setUp(self):
self._installed = False
def _is_package_seems_to_be_installed(self, package_name, executable_name):
return self._installed
class test_package_versioning(unittest.TestCase):
Solaris_v1 = b""" VERSION: 6.0.100.000,REV=08.01.2012.09.00"""
Solaris_v2 = b""" VERSION: 5.14.2.5"""
Ubuntu_v1 = b"""Version: 0.4.9-3ubuntu7.2"""
Ubuntu_v2 = b"""Version: 1:1.2.8.dfsg-1ubuntu1"""
rpm_v1 = b"""4.8-7.el7"""
rpm_v2 = b"""18.168.6.1-34.el7"""
def test_solaris_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v1
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.Solaris_v1)
self.assertEqual(result, {'version': '6.0.100.000', 'revision': '08.01.2012.09.00'})
def test_solaris_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Solaris_v2
patched().get_returncode.return_value = 0
result = SolarisPackageManager().get_installed_version(self.Solaris_v2)
self.assertEqual(result, {'version': '5.14.2.5'})
def test_ubuntu_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v1
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v1)
self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})
def test_ubuntu_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.Ubuntu_v2
patched().get_returncode.return_value = 0
result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v2)
self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})
def test_rpm_versioning_v1(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v1
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v1)
self.assertEqual(result, {'version': '4.8-7.el7'})
def test_rpm_versioning_v2(self):
with patch.object(pkgmgr, 'execute_command') as patched:
patched().get_stdout.return_value = self.rpm_v2
patched().get_returncode.return_value = 0
result = RpmMixin().get_installed_version(self.rpm_v2)
self.assertEqual(result, {'version': '18.168.6.1-34.el7'})
class GeneralTest(unittest.TestCase):
def _is_solaris(self):
from infi.os_info import get_platform_string
return get_platform_string().split('-')[0] == 'solaris'
def test_get_package_manager(self):
package_manager = pkgmgr.get_package_manager()
package_to_check = 'python'
if self._is_solaris():
package_to_check = 'CSW' + package_to_check
self.assertTrue(package_manager.is_package_installed(package_to_check))
|
flexible
|
{
"blob_id": "b3c1843a742a82bca61650ab89ea8afdf3c9010d",
"index": 6667,
"step-1": "<mask token>\n\n\nclass TestUbuntuMock(TestOnUbuntu):\n\n def _should_skip(self):\n pass\n\n def _dpkg_query_s(self):\n from textwrap import dedent\n if self._installed:\n return Output(stdout=dedent(\n \"\"\"\n Package: sg3-utils\n Status: installed ok installed\n Priority: optional\n Version: 1.30-1\n Section: admin\n \"\"\"\n ).encode('ascii'))\n else:\n return Output(stdout=dedent(\n \"\"\"\n dpkg-query: package sg3-utils is not installed and no information is available\n Use dpkg --info (= dpkg-deb --info) to examine archive files,\n and dpkg --contents (= dpkg-deb --contents) to list their contents.\n \"\"\"\n ).encode('ascii'), returncode=1)\n\n def _dpkg_query_l(self):\n from textwrap import dedent\n return Output(stdout=dedent(\n \"\"\"\n Desired=Unknown/Install/Remove/Purge/Hold\n | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend\n |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n ||/ Name Version Architecture Description\n +++-===========================-==================-==================-===========================================================\n {} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set\n \"\"\"\n .format('ii' if self._installed else 'un')).encode('ascii'))\n <mask token>\n\n def _apt_get_update(self):\n return Output()\n <mask token>\n <mask token>\n\n def test_check_unknown_package(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_check_unknown_package()\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass TestRedHatMock(TestOnRedHat):\n\n def _should_skip(self):\n pass\n\n def _rpm_query(self):\n return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else\n b'package sg3_utils is not installed', returncode=0 if self.\n _installed else 1)\n\n def _yum_install(self):\n self._installed = True\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if '-q' in command:\n return self._rpm_query()\n elif 'install' in command:\n return self._yum_install()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestRedHatMock, self).test_sg3_utils()\n pass\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass test_package_versioning(unittest.TestCase):\n Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'\n Solaris_v2 = b' VERSION: 5.14.2.5'\n Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'\n Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'\n rpm_v1 = b'4.8-7.el7'\n rpm_v2 = b'18.168.6.1-34.el7'\n\n def test_solaris_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v1\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v1)\n self.assertEqual(result, {'version': '6.0.100.000', 'revision':\n '08.01.2012.09.00'})\n\n def test_solaris_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v2\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v2)\n self.assertEqual(result, {'version': '5.14.2.5'})\n\n def test_ubuntu_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v1\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v1)\n self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})\n\n def test_ubuntu_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v2\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v2)\n self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})\n\n def test_rpm_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v1\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v1)\n self.assertEqual(result, {'version': '4.8-7.el7'})\n\n def test_rpm_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v2\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v2)\n self.assertEqual(result, {'version': '18.168.6.1-34.el7'})\n\n\nclass GeneralTest(unittest.TestCase):\n\n def _is_solaris(self):\n from infi.os_info import get_platform_string\n return get_platform_string().split('-')[0] == 'solaris'\n\n def test_get_package_manager(self):\n package_manager = pkgmgr.get_package_manager()\n package_to_check = 'python'\n if self._is_solaris():\n package_to_check = 'CSW' + package_to_check\n self.assertTrue(package_manager.is_package_installed(package_to_check))\n",
"step-2": "<mask token>\n\n\nclass Output(object):\n\n def __init__(self, returncode=0, stdout='', stderr=''):\n super(Output, self).__init__()\n self._returncode = returncode\n self._stdout = stdout\n self._stderr = stderr\n\n def get_stdout(self):\n return self._stdout\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestUbuntuMock(TestOnUbuntu):\n\n def _should_skip(self):\n pass\n\n def _dpkg_query_s(self):\n from textwrap import dedent\n if self._installed:\n return Output(stdout=dedent(\n \"\"\"\n Package: sg3-utils\n Status: installed ok installed\n Priority: optional\n Version: 1.30-1\n Section: admin\n \"\"\"\n ).encode('ascii'))\n else:\n return Output(stdout=dedent(\n \"\"\"\n dpkg-query: package sg3-utils is not installed and no information is available\n Use dpkg --info (= dpkg-deb --info) to examine archive files,\n and dpkg --contents (= dpkg-deb --contents) to list their contents.\n \"\"\"\n ).encode('ascii'), returncode=1)\n\n def _dpkg_query_l(self):\n from textwrap import dedent\n return Output(stdout=dedent(\n \"\"\"\n Desired=Unknown/Install/Remove/Purge/Hold\n | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend\n |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n ||/ Name Version Architecture Description\n +++-===========================-==================-==================-===========================================================\n {} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set\n \"\"\"\n .format('ii' if self._installed else 'un')).encode('ascii'))\n\n def _apt_get_install(self):\n self._installed = True\n return Output()\n\n def _apt_get_update(self):\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if 'dpkg-query' in command:\n if '-s' in command:\n return self._dpkg_query_s()\n if '-l' in command:\n return self._dpkg_query_l()\n elif 'apt-get install' in ' '.join(command):\n return self._apt_get_install()\n elif 'apt-get update' in ' '.join(command):\n return self._apt_get_update()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_sg3_utils()\n\n def test_check_unknown_package(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_check_unknown_package()\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass TestRedHatMock(TestOnRedHat):\n\n def _should_skip(self):\n pass\n\n def _rpm_query(self):\n return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else\n b'package sg3_utils is not installed', returncode=0 if self.\n _installed else 1)\n\n def _yum_install(self):\n self._installed = True\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if '-q' in command:\n return self._rpm_query()\n elif 'install' in command:\n return self._yum_install()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestRedHatMock, self).test_sg3_utils()\n pass\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass test_package_versioning(unittest.TestCase):\n Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'\n Solaris_v2 = b' VERSION: 5.14.2.5'\n Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'\n Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'\n rpm_v1 = b'4.8-7.el7'\n rpm_v2 = b'18.168.6.1-34.el7'\n\n def test_solaris_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v1\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v1)\n self.assertEqual(result, {'version': '6.0.100.000', 'revision':\n '08.01.2012.09.00'})\n\n def test_solaris_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v2\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v2)\n self.assertEqual(result, {'version': '5.14.2.5'})\n\n def test_ubuntu_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v1\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v1)\n self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})\n\n def test_ubuntu_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v2\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v2)\n self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})\n\n def test_rpm_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v1\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v1)\n self.assertEqual(result, {'version': '4.8-7.el7'})\n\n def test_rpm_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v2\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v2)\n self.assertEqual(result, {'version': '18.168.6.1-34.el7'})\n\n\nclass GeneralTest(unittest.TestCase):\n\n def _is_solaris(self):\n from infi.os_info import get_platform_string\n return get_platform_string().split('-')[0] == 'solaris'\n\n def test_get_package_manager(self):\n package_manager = pkgmgr.get_package_manager()\n package_to_check = 'python'\n if self._is_solaris():\n package_to_check = 'CSW' + package_to_check\n self.assertTrue(package_manager.is_package_installed(package_to_check))\n",
"step-3": "<mask token>\n\n\nclass TestOnRedHat(unittest.TestCase):\n\n def _running_on_redhat(self):\n return distro.id() == 'rhel'\n\n def setUp(self):\n super(TestOnRedHat, self).setUp()\n self._should_skip()\n\n def _should_skip(self):\n if not self._running_on_redhat():\n raise self.skipTest('This test runs only on red hat')\n if not RootPermissions().is_root():\n raise self.skipTest('This test must run with root permissions')\n\n def test_sg3_utils(self):\n self._check_package('sg3_utils', '/usr/bin/sg_inq')\n\n def _check_package(self, package_name, executable_name):\n pkgmgr = RedHatPackageManager()\n is_installed_before = self._is_package_seems_to_be_installed(\n package_name, executable_name)\n self.assertEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n pkgmgr.install_package(package_name\n ) if not is_installed_before else pkgmgr.remove_package(\n package_name)\n self.assertNotEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n from os.path import exists\n return exists(executable_name)\n\n\nclass Output(object):\n\n def __init__(self, returncode=0, stdout='', stderr=''):\n super(Output, self).__init__()\n self._returncode = returncode\n self._stdout = stdout\n self._stderr = stderr\n\n def get_stdout(self):\n return self._stdout\n\n def get_stderr(self):\n return self._stderr\n\n def get_returncode(self):\n return self._returncode\n\n def wait(self, timeout=None):\n pass\n\n\nclass TestUbuntuMock(TestOnUbuntu):\n\n def _should_skip(self):\n pass\n\n def _dpkg_query_s(self):\n from textwrap import dedent\n if self._installed:\n return Output(stdout=dedent(\n \"\"\"\n Package: sg3-utils\n Status: installed ok installed\n Priority: optional\n Version: 1.30-1\n Section: admin\n \"\"\"\n ).encode('ascii'))\n else:\n return Output(stdout=dedent(\n \"\"\"\n dpkg-query: package sg3-utils is not installed and no information is available\n Use dpkg --info (= dpkg-deb --info) to examine archive files,\n and dpkg --contents (= dpkg-deb --contents) to list their contents.\n \"\"\"\n ).encode('ascii'), returncode=1)\n\n def _dpkg_query_l(self):\n from textwrap import dedent\n return Output(stdout=dedent(\n \"\"\"\n Desired=Unknown/Install/Remove/Purge/Hold\n | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend\n |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n ||/ Name Version Architecture Description\n +++-===========================-==================-==================-===========================================================\n {} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set\n \"\"\"\n .format('ii' if self._installed else 'un')).encode('ascii'))\n\n def _apt_get_install(self):\n self._installed = True\n return Output()\n\n def _apt_get_update(self):\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if 'dpkg-query' in command:\n if '-s' in command:\n return self._dpkg_query_s()\n if '-l' in command:\n return self._dpkg_query_l()\n elif 'apt-get install' in ' '.join(command):\n return self._apt_get_install()\n elif 'apt-get update' in ' '.join(command):\n return self._apt_get_update()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_sg3_utils()\n\n def test_check_unknown_package(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_check_unknown_package()\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass TestRedHatMock(TestOnRedHat):\n\n def _should_skip(self):\n pass\n\n def _rpm_query(self):\n return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else\n b'package sg3_utils is not installed', returncode=0 if self.\n _installed else 1)\n\n def _yum_install(self):\n self._installed = True\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if '-q' in command:\n return self._rpm_query()\n elif 'install' in command:\n return self._yum_install()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestRedHatMock, self).test_sg3_utils()\n pass\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass test_package_versioning(unittest.TestCase):\n Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'\n Solaris_v2 = b' VERSION: 5.14.2.5'\n Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'\n Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'\n rpm_v1 = b'4.8-7.el7'\n rpm_v2 = b'18.168.6.1-34.el7'\n\n def test_solaris_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v1\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v1)\n self.assertEqual(result, {'version': '6.0.100.000', 'revision':\n '08.01.2012.09.00'})\n\n def test_solaris_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v2\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v2)\n self.assertEqual(result, {'version': '5.14.2.5'})\n\n def test_ubuntu_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v1\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v1)\n self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})\n\n def test_ubuntu_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v2\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v2)\n self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})\n\n def test_rpm_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v1\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v1)\n self.assertEqual(result, {'version': '4.8-7.el7'})\n\n def test_rpm_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v2\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v2)\n self.assertEqual(result, {'version': '18.168.6.1-34.el7'})\n\n\nclass GeneralTest(unittest.TestCase):\n\n def _is_solaris(self):\n from infi.os_info import get_platform_string\n return get_platform_string().split('-')[0] == 'solaris'\n\n def test_get_package_manager(self):\n package_manager = pkgmgr.get_package_manager()\n package_to_check = 'python'\n if self._is_solaris():\n package_to_check = 'CSW' + package_to_check\n self.assertTrue(package_manager.is_package_installed(package_to_check))\n",
"step-4": "<mask token>\n\n\nclass TestOnUbuntu(unittest.TestCase):\n <mask token>\n <mask token>\n\n def _should_skip(self):\n if not self._running_on_ubuntu():\n raise self.skipTest('This test runs only on ubuntu')\n if not RootPermissions().is_root():\n raise self.skipTest('This test must run with root permissions')\n <mask token>\n\n def _check_package(self, package_name, executable_name):\n pkgmgr = UbuntuPackageManager()\n is_installed_before = self._is_package_seems_to_be_installed(\n package_name, executable_name)\n self.assertEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n pkgmgr.install_package(package_name\n ) if not is_installed_before else pkgmgr.remove_package(\n package_name)\n self.assertNotEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n <mask token>\n <mask token>\n\n\nclass TestOnRedHat(unittest.TestCase):\n\n def _running_on_redhat(self):\n return distro.id() == 'rhel'\n\n def setUp(self):\n super(TestOnRedHat, self).setUp()\n self._should_skip()\n\n def _should_skip(self):\n if not self._running_on_redhat():\n raise self.skipTest('This test runs only on red hat')\n if not RootPermissions().is_root():\n raise self.skipTest('This test must run with root permissions')\n\n def test_sg3_utils(self):\n self._check_package('sg3_utils', '/usr/bin/sg_inq')\n\n def _check_package(self, package_name, executable_name):\n pkgmgr = RedHatPackageManager()\n is_installed_before = self._is_package_seems_to_be_installed(\n package_name, executable_name)\n self.assertEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n pkgmgr.install_package(package_name\n ) if not is_installed_before else pkgmgr.remove_package(\n package_name)\n self.assertNotEqual(pkgmgr.is_package_installed(package_name),\n is_installed_before)\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n from os.path import exists\n return exists(executable_name)\n\n\nclass Output(object):\n\n def __init__(self, returncode=0, stdout='', stderr=''):\n super(Output, self).__init__()\n self._returncode = returncode\n self._stdout = stdout\n self._stderr = stderr\n\n def get_stdout(self):\n return self._stdout\n\n def get_stderr(self):\n return self._stderr\n\n def get_returncode(self):\n return self._returncode\n\n def wait(self, timeout=None):\n pass\n\n\nclass TestUbuntuMock(TestOnUbuntu):\n\n def _should_skip(self):\n pass\n\n def _dpkg_query_s(self):\n from textwrap import dedent\n if self._installed:\n return Output(stdout=dedent(\n \"\"\"\n Package: sg3-utils\n Status: installed ok installed\n Priority: optional\n Version: 1.30-1\n Section: admin\n \"\"\"\n ).encode('ascii'))\n else:\n return Output(stdout=dedent(\n \"\"\"\n dpkg-query: package sg3-utils is not installed and no information is available\n Use dpkg --info (= dpkg-deb --info) to examine archive files,\n and dpkg --contents (= dpkg-deb --contents) to list their contents.\n \"\"\"\n ).encode('ascii'), returncode=1)\n\n def _dpkg_query_l(self):\n from textwrap import dedent\n return Output(stdout=dedent(\n \"\"\"\n Desired=Unknown/Install/Remove/Purge/Hold\n | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend\n |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n ||/ Name Version Architecture Description\n +++-===========================-==================-==================-===========================================================\n {} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set\n \"\"\"\n .format('ii' if self._installed else 'un')).encode('ascii'))\n\n def _apt_get_install(self):\n self._installed = True\n return Output()\n\n def _apt_get_update(self):\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if 'dpkg-query' in command:\n if '-s' in command:\n return self._dpkg_query_s()\n if '-l' in command:\n return self._dpkg_query_l()\n elif 'apt-get install' in ' '.join(command):\n return self._apt_get_install()\n elif 'apt-get update' in ' '.join(command):\n return self._apt_get_update()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_sg3_utils()\n\n def test_check_unknown_package(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_check_unknown_package()\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass TestRedHatMock(TestOnRedHat):\n\n def _should_skip(self):\n pass\n\n def _rpm_query(self):\n return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else\n b'package sg3_utils is not installed', returncode=0 if self.\n _installed else 1)\n\n def _yum_install(self):\n self._installed = True\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch('infi.execute.execute') as execute:\n\n def side_effect(*args, **kwargs):\n command = args[0]\n if '-q' in command:\n return self._rpm_query()\n elif 'install' in command:\n return self._yum_install()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestRedHatMock, self).test_sg3_utils()\n pass\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\n\nclass test_package_versioning(unittest.TestCase):\n Solaris_v1 = b' VERSION: 6.0.100.000,REV=08.01.2012.09.00'\n Solaris_v2 = b' VERSION: 5.14.2.5'\n Ubuntu_v1 = b'Version: 0.4.9-3ubuntu7.2'\n Ubuntu_v2 = b'Version: 1:1.2.8.dfsg-1ubuntu1'\n rpm_v1 = b'4.8-7.el7'\n rpm_v2 = b'18.168.6.1-34.el7'\n\n def test_solaris_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v1\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v1)\n self.assertEqual(result, {'version': '6.0.100.000', 'revision':\n '08.01.2012.09.00'})\n\n def test_solaris_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v2\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.\n Solaris_v2)\n self.assertEqual(result, {'version': '5.14.2.5'})\n\n def test_ubuntu_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v1\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v1)\n self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})\n\n def test_ubuntu_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v2\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.\n Ubuntu_v2)\n self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})\n\n def test_rpm_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v1\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v1)\n self.assertEqual(result, {'version': '4.8-7.el7'})\n\n def test_rpm_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v2\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v2)\n self.assertEqual(result, {'version': '18.168.6.1-34.el7'})\n\n\nclass GeneralTest(unittest.TestCase):\n\n def _is_solaris(self):\n from infi.os_info import get_platform_string\n return get_platform_string().split('-')[0] == 'solaris'\n\n def test_get_package_manager(self):\n package_manager = pkgmgr.get_package_manager()\n package_to_check = 'python'\n if self._is_solaris():\n package_to_check = 'CSW' + package_to_check\n self.assertTrue(package_manager.is_package_installed(package_to_check))\n",
"step-5": "from . import UbuntuPackageManager, RedHatPackageManager, SolarisPackageManager, RpmMixin\nfrom infi import unittest\n\nfrom infi.run_as_root import RootPermissions\nfrom contextlib import contextmanager\n\nfrom infi import pkgmgr\nfrom mock import patch\nimport distro\n# pylint: disable-all\n\n\nclass TestOnUbuntu(unittest.TestCase):\n def _running_on_ubuntu(self):\n return distro.id() == \"ubuntu\"\n\n def setUp(self):\n super(TestOnUbuntu, self).setUp()\n self._should_skip()\n\n def _should_skip(self):\n if not self._running_on_ubuntu():\n raise self.skipTest(\"This test runs only on ubuntu\")\n if not RootPermissions().is_root():\n raise self.skipTest(\"This test must run with root permissions\")\n\n def test_sg3_utils(self):\n from infi.execute import execute\n execute('apt-get update'.split())\n self._check_package(\"sg3-utils\", \"/usr/bin/sg_inq\")\n\n def _check_package(self, package_name, executable_name):\n pkgmgr = UbuntuPackageManager()\n is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)\n self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)\n # Do the opposite\n pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)\n self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n from os.path import exists\n return exists(executable_name)\n\n def test_check_unknown_package(self):\n pkgmgr = UbuntuPackageManager()\n self.assertFalse(pkgmgr.is_package_installed('blablabla9988ok'))\n\n\nclass TestOnRedHat(unittest.TestCase):\n def _running_on_redhat(self):\n return distro.id() == \"rhel\"\n\n def setUp(self):\n super(TestOnRedHat, self).setUp()\n self._should_skip()\n\n def _should_skip(self):\n if not self._running_on_redhat():\n raise self.skipTest(\"This test runs only on red hat\")\n if not RootPermissions().is_root():\n raise self.skipTest(\"This test must run with root permissions\")\n\n def test_sg3_utils(self):\n self._check_package(\"sg3_utils\", \"/usr/bin/sg_inq\")\n\n def _check_package(self, package_name, executable_name):\n pkgmgr = RedHatPackageManager()\n is_installed_before = self._is_package_seems_to_be_installed(package_name, executable_name)\n self.assertEqual(pkgmgr.is_package_installed(package_name), is_installed_before)\n # Do the opposite\n pkgmgr.install_package(package_name) if not is_installed_before else pkgmgr.remove_package(package_name)\n self.assertNotEqual(pkgmgr.is_package_installed(package_name), is_installed_before)\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n from os.path import exists\n return exists(executable_name)\n\nclass Output(object):\n def __init__(self, returncode=0, stdout='', stderr=''):\n super(Output, self).__init__()\n self._returncode = returncode\n self._stdout = stdout\n self._stderr = stderr\n\n def get_stdout(self):\n return self._stdout\n\n def get_stderr(self):\n return self._stderr\n\n def get_returncode(self):\n return self._returncode\n\n def wait(self, timeout=None):\n pass\n\nclass TestUbuntuMock(TestOnUbuntu):\n def _should_skip(self):\n pass\n\n def _dpkg_query_s(self):\n from textwrap import dedent\n if self._installed:\n return Output(stdout=dedent(\"\"\"\n Package: sg3-utils\n Status: installed ok installed\n Priority: optional\n Version: 1.30-1\n Section: admin\n \"\"\").encode(\"ascii\"))\n else:\n return Output(stdout=dedent(\"\"\"\n dpkg-query: package sg3-utils is not installed and no information is available\n Use dpkg --info (= dpkg-deb --info) to examine archive files,\n and dpkg --contents (= dpkg-deb --contents) to list their contents.\n \"\"\").encode(\"ascii\"), returncode=1)\n\n def _dpkg_query_l(self):\n from textwrap import dedent\n return Output(stdout=dedent(\"\"\"\n Desired=Unknown/Install/Remove/Purge/Hold\n | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend\n |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)\n ||/ Name Version Architecture Description\n +++-===========================-==================-==================-===========================================================\n {} sg3-utils 1.30-1 i386 utilities for devices using the SCSI command set\n \"\"\".format(\"ii\" if self._installed else \"un\")).encode(\"ascii\"))\n\n def _apt_get_install(self):\n self._installed = True\n return Output()\n\n def _apt_get_update(self):\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch(\"infi.execute.execute\") as execute:\n def side_effect(*args, **kwargs):\n command = args[0]\n if \"dpkg-query\" in command:\n if \"-s\" in command:\n return self._dpkg_query_s()\n if \"-l\" in command:\n return self._dpkg_query_l()\n elif \"apt-get install\" in ' '.join(command):\n return self._apt_get_install()\n elif \"apt-get update\" in ' '.join(command):\n return self._apt_get_update()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_sg3_utils()\n\n def test_check_unknown_package(self):\n with self._apply_patches():\n super(TestUbuntuMock, self).test_check_unknown_package()\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\nclass TestRedHatMock(TestOnRedHat):\n def _should_skip(self):\n pass\n\n def _rpm_query(self):\n return Output(stdout=b'sg3_utils-1.25-5.el5' if self._installed else b'package sg3_utils is not installed',\n returncode=0 if self._installed else 1)\n\n def _yum_install(self):\n self._installed = True\n return Output()\n\n @contextmanager\n def _apply_patches(self):\n with patch(\"infi.execute.execute\") as execute:\n def side_effect(*args, **kwargs):\n command = args[0]\n if \"-q\" in command:\n return self._rpm_query()\n elif \"install\" in command:\n return self._yum_install()\n raise NotImplementedError()\n execute.side_effect = side_effect\n yield\n\n def test_sg3_utils(self):\n with self._apply_patches():\n super(TestRedHatMock, self).test_sg3_utils()\n pass\n\n def setUp(self):\n self._installed = False\n\n def _is_package_seems_to_be_installed(self, package_name, executable_name):\n return self._installed\n\nclass test_package_versioning(unittest.TestCase):\n\n Solaris_v1 = b\"\"\" VERSION: 6.0.100.000,REV=08.01.2012.09.00\"\"\"\n Solaris_v2 = b\"\"\" VERSION: 5.14.2.5\"\"\"\n Ubuntu_v1 = b\"\"\"Version: 0.4.9-3ubuntu7.2\"\"\"\n Ubuntu_v2 = b\"\"\"Version: 1:1.2.8.dfsg-1ubuntu1\"\"\"\n rpm_v1 = b\"\"\"4.8-7.el7\"\"\"\n rpm_v2 = b\"\"\"18.168.6.1-34.el7\"\"\"\n def test_solaris_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v1\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.Solaris_v1)\n self.assertEqual(result, {'version': '6.0.100.000', 'revision': '08.01.2012.09.00'})\n\n def test_solaris_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Solaris_v2\n patched().get_returncode.return_value = 0\n result = SolarisPackageManager().get_installed_version(self.Solaris_v2)\n self.assertEqual(result, {'version': '5.14.2.5'})\n\n def test_ubuntu_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v1\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v1)\n self.assertEqual(result, {'version': '0.4.9-3ubuntu7.2'})\n\n def test_ubuntu_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.Ubuntu_v2\n patched().get_returncode.return_value = 0\n result = UbuntuPackageManager().get_installed_version(self.Ubuntu_v2)\n self.assertEqual(result, {'version': '1:1.2.8.dfsg-1ubuntu1'})\n\n def test_rpm_versioning_v1(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v1\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v1)\n self.assertEqual(result, {'version': '4.8-7.el7'})\n\n def test_rpm_versioning_v2(self):\n with patch.object(pkgmgr, 'execute_command') as patched:\n patched().get_stdout.return_value = self.rpm_v2\n patched().get_returncode.return_value = 0\n result = RpmMixin().get_installed_version(self.rpm_v2)\n self.assertEqual(result, {'version': '18.168.6.1-34.el7'})\n\nclass GeneralTest(unittest.TestCase):\n def _is_solaris(self):\n from infi.os_info import get_platform_string\n return get_platform_string().split('-')[0] == 'solaris'\n\n def test_get_package_manager(self):\n package_manager = pkgmgr.get_package_manager()\n package_to_check = 'python'\n if self._is_solaris():\n package_to_check = 'CSW' + package_to_check\n self.assertTrue(package_manager.is_package_installed(package_to_check))\n",
"step-ids": [
27,
33,
43,
46,
53
]
}
|
[
27,
33,
43,
46,
53
] |
from django.apps import AppConfig
class Sharem8Config(AppConfig):
name = 'ShareM8'
|
normal
|
{
"blob_id": "fd4d785d933c3a200f4aba094ecfe1e1c76737a5",
"index": 7629,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sharem8Config(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Sharem8Config(AppConfig):\n name = 'ShareM8'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass Sharem8Config(AppConfig):\n name = 'ShareM8'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
<|reserved_special_token_0|>
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
<|reserved_special_token_0|>
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
<|reserved_special_token_1|>
import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static('/does/not/exist')
def test_static_construct_requires_directory(tmpdir):
name = 'foo'
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
<|reserved_special_token_1|>
#
# tests/middleware/test_static.py
#
import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static("/does/not/exist")
def test_static_construct_requires_directory(tmpdir):
name = "foo"
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
|
flexible
|
{
"blob_id": "9a7994a1e51c9cf7fe7d8b50ab26fa3d789fc8e5",
"index": 1012,
"step-1": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\n<mask token>\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\n<mask token>\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-3": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-4": "import pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-5": "#\n# tests/middleware/test_static.py\n#\n\nimport pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static(\"/does/not/exist\")\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = \"foo\"\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n static(req, res)\n\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n\n req.path = '/foo/../bar'\n static(req, res)\n\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n req.headers = {'IF-NONE-MATCH': etag}\n\n static(req, res)\n\n assert res.status_code == 304\n\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
from arcgis.geocoding import geocode
from arcgis.gis import GIS
import pandas as pd
import Point_v1
"""
This module is used to get the location information of different companies from arcgis API.
"""
def crawl(file):
gis = GIS()
map = gis.map("United States")
map
# read all kinds of job files
job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(
pd.read_csv(Point_v1.DS_FILE)).append(
pd.read_csv(Point_v1.SDE_FILE))
company_loc_df = pd.DataFrame()
company_loc_df["company"] = job_df["company"].unique()
geo_info = company_loc_df["company"].apply(lambda company: geocode(company)[0] if geocode(company) else None)
company_loc_df['longitude'] = geo_info.apply(lambda info: info["location"]["x"] if info else None)
company_loc_df['latitude'] = geo_info.apply(lambda info: info["location"]["y"] if info else None)
company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']['City'] if info else None)
company_loc_df['state'] = geo_info.apply(lambda info: info['attributes']['RegionAbbr'] if info else None)
company_loc_df.to_csv(file, encoding='utf-8', index=False)
|
normal
|
{
"blob_id": "902159d9ad3a1e36b69142518007b5d4bcaef0f3",
"index": 1320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawl(file):\n gis = GIS()\n map = gis.map('United States')\n map\n job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(pd.read_csv(\n Point_v1.DS_FILE)).append(pd.read_csv(Point_v1.SDE_FILE))\n company_loc_df = pd.DataFrame()\n company_loc_df['company'] = job_df['company'].unique()\n geo_info = company_loc_df['company'].apply(lambda company: geocode(\n company)[0] if geocode(company) else None)\n company_loc_df['longitude'] = geo_info.apply(lambda info: info[\n 'location']['x'] if info else None)\n company_loc_df['latitude'] = geo_info.apply(lambda info: info[\n 'location']['y'] if info else None)\n company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']\n ['City'] if info else None)\n company_loc_df['state'] = geo_info.apply(lambda info: info['attributes'\n ]['RegionAbbr'] if info else None)\n company_loc_df.to_csv(file, encoding='utf-8', index=False)\n",
"step-3": "from arcgis.geocoding import geocode\nfrom arcgis.gis import GIS\nimport pandas as pd\nimport Point_v1\n<mask token>\n\n\ndef crawl(file):\n gis = GIS()\n map = gis.map('United States')\n map\n job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(pd.read_csv(\n Point_v1.DS_FILE)).append(pd.read_csv(Point_v1.SDE_FILE))\n company_loc_df = pd.DataFrame()\n company_loc_df['company'] = job_df['company'].unique()\n geo_info = company_loc_df['company'].apply(lambda company: geocode(\n company)[0] if geocode(company) else None)\n company_loc_df['longitude'] = geo_info.apply(lambda info: info[\n 'location']['x'] if info else None)\n company_loc_df['latitude'] = geo_info.apply(lambda info: info[\n 'location']['y'] if info else None)\n company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']\n ['City'] if info else None)\n company_loc_df['state'] = geo_info.apply(lambda info: info['attributes'\n ]['RegionAbbr'] if info else None)\n company_loc_df.to_csv(file, encoding='utf-8', index=False)\n",
"step-4": "from arcgis.geocoding import geocode\nfrom arcgis.gis import GIS\nimport pandas as pd\nimport Point_v1\n\n\"\"\"\nThis module is used to get the location information of different companies from arcgis API.\n\"\"\"\n\n\ndef crawl(file):\n gis = GIS()\n map = gis.map(\"United States\")\n map\n\n # read all kinds of job files\n job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(\n pd.read_csv(Point_v1.DS_FILE)).append(\n pd.read_csv(Point_v1.SDE_FILE))\n\n company_loc_df = pd.DataFrame()\n company_loc_df[\"company\"] = job_df[\"company\"].unique()\n geo_info = company_loc_df[\"company\"].apply(lambda company: geocode(company)[0] if geocode(company) else None)\n\n company_loc_df['longitude'] = geo_info.apply(lambda info: info[\"location\"][\"x\"] if info else None)\n company_loc_df['latitude'] = geo_info.apply(lambda info: info[\"location\"][\"y\"] if info else None)\n company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']['City'] if info else None)\n company_loc_df['state'] = geo_info.apply(lambda info: info['attributes']['RegionAbbr'] if info else None)\n\n company_loc_df.to_csv(file, encoding='utf-8', index=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2 as cv
import random
import time
random.seed(0)
def displayImage(winName, img):
""" Helper function to display image
arguments:
winName -- Name of display window
img -- Source Image
"""
cv.imshow(winName, img)
cv.waitKey(0)
##############################################
# Task 1 ##########################
##############################################
def task_1_a():
print("Task 1 (a) ...")
img = cv.imread('../images/shapes.png')
gray_image = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
edges = cv.Canny( gray_image,50,150)
#cv.imshow('edges', edges)
detected_lines = cv.HoughLines(edges,1,np.pi/180,10)
#print (detected_lines)
for rho,theta in detected_lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,255,0),1)
displayImage('1_a Hough transform - detected lines ', img)
def myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):
"""
Your implementation of HoughLines
:param img_edges: single-channel binary source image (e.g: edges)
:param d_resolution: the resolution for the distance parameter
:param theta_step_sz: the resolution for the angle parameter
:param threshold: minimum number of votes to consider a detection
:return: list of detected lines as (d, theta) pairs and the accumulator
"""
accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(img_edges.shape) / d_resolution)))
detected_lines = []
rho = int(np.linalg.norm(img_edges.shape) / d_resolution)
#print (rho)
theta = int(180 / theta_step_sz)
theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))
#print (theta)
width, height = img_edges.shape
img_edges_copy = img_edges.copy()
detected_lines = []
for x in range(width):
for y in range(height):
if img_edges_copy[x,y]:
for index_theta in range(len(theta_array)):
#theta_value = theta * index_theta
rho_value = x*np.cos(theta_array[index_theta]) + y*np.sin(theta_array[index_theta])
# to avoid negative index
index_rho = int (rho_value + rho/2)
# to avoid index overflow
if (index_rho >= rho) : continue
#print('rhoindex')
#print (index_rho)
accumulator[index_theta, index_rho] += 1
if accumulator[index_theta, index_rho] >= threshold:
detected_lines.append((theta_array[index_theta], rho_value))
return detected_lines, accumulator
def task_1_b():
print("Task 1 (b) ...")
img = cv.imread('../images/shapes.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150) # detect the edges
detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)
cv.imshow("1_b Accumulator myHoughLines", accumulator)
#print (len(detected_lines))
for theta,rho in detected_lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
displayImage('1_b Hough transform - own implementation', img)
##############################################
# Task 2 ##########################
##############################################
def task_2():
print("Task 2 ...")
img = cv.imread('../images/line.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150,apertureSize = 3) # detect the edges
theta_res = 1 # set the resolution of theta
d_res = 1 # set the distance resolution
_, accumulator = myHoughLines(edges, d_res, theta_res, 50)
displayImage("task_2_ accumulator - mean shift", accumulator)
#mean_shift(accumulator)
##############################################
# Task 3 ##########################
##############################################
def myKmeans(data, k, useDist = False):
"""
:return: centers and list of indices that store the cluster index for each data point
"""
centers = np.zeros((k, 1), dtype = int)
index = np.zeros(data.shape[0], dtype=int)
clusters = [[] for i in range(k)]
threshold = 0
if data.shape[1] > 1:
threshold = 20
print('Threshold value = ' + str(threshold))
print('-------------------------------------------------')
# initialize centers using some random points from data
# ....
# Randomly initialize centers with pixel difference of greater than 0
for idx in range(centers.shape[0]):
randIdx = random.choice(range(data.shape[0]))
centers[idx] = randIdx
# Randomly initialize centers of different pixl values. Still buggy
# start_time = time.time()
# indices = np.arange(0,data.shape[0]).tolist()
# for idx in range(centers.shape[0]):
# if len(indices) > 0:
# randIdx = random.choice(indices)
# delIndices = np.unique(np.where((data*255).astype('uint8') == (data[randIdx]*255).astype('uint8'))).tolist()
# if len(delIndices) > 0:
# for i in range(len(delIndices)):
# try:
# indices.remove(delIndices[i])
# except ValueError:
# print('Value not found')
# # print('Indices removed')
# else:
# randIdx = random.choice(range(data.shape[0]))
# centers[idx] = randIdx
# end_time = time.time()
# print('Center no' + str(idx+1) + ' added in ' + str(round(end_time - start_time,5)) + ' seconds')
# To debug uncomment the following lines
# Sometimes the pixel values of two cluster centroids are too close
# Therefore, one of the clusters might end up not having any points at all
# print('Initial centers:\n' + str(centers))
# print('-------------------------------------------------')
# centerVals = data[centers]
# print('Pixel Values of initial centers:\n' + str(centerVals))
# print('-------------------------------------------------')
convergence = False
iterationNo = 0
start_time = time.time()
while not convergence:
# assign each point to the cluster of closest center
# ...
euclDist = 0
centerVals = data[centers]
for idx in range(data.shape[0]):
if useDist:
# Since data is a vector, distance is only the difference
# Normalize the distance to keep it between 0 and 1
euclDist = (centers - idx) / data.shape[0]
cost = np.square(data[idx] - centerVals) + np.square(euclDist)
index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])
clusters[index[idx]].append(idx)
# update clusters' centers and check for convergence
# ...
convCounter = 0
for idx in range(centers.shape[0]):
if (len(clusters[idx]) > 0):
if data.shape[1] == 1:
meanVal = np.mean(data[clusters[idx]])
elif data.shape[1] == 3:
meanVal = np.mean(data[clusters[idx]], axis = 0)
diff = (np.abs(centerVals[idx] - meanVal)*255).astype('uint8')
if (np.sum(diff) > threshold):
# indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
if indices.size > 0:
centers[idx] = np.random.choice(indices)
else:
# if no pixel with the mean value is found, choose another pixel in the cluster
# and continue
centers[idx] = np.random.choice(clusters[idx])
else:
convCounter += 1
else:
convCounter += 1
if convCounter == k:
convergence = True
iterationNo += 1
print('iterationNo = ', iterationNo)
print('-------------------------------------------------')
end_time = time.time()
print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time - start_time, 5)) + ' seconds')
print('-------------------------------------------------')
return index, centers
def task_3_a():
print("Task 3 (a) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
def task_3_b():
print("Task 3 (b) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
imgFloat = img.copy().astype('float64')
imgFloat /= 255
cv.imshow('Color Image', imgFloat)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
imgVec = np.reshape(imgFloat.copy(), (-1,3))
index, centers = myKmeans(imgVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
imgVec[indices] = imgVec[centers[kVal]]
cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape(imgFloat.shape))
cv.waitKey(0)
print('=================================================')
def task_3_c():
print("Task 3 (c) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k, useDist = True)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
##############################################
# Task 4 ##########################
##############################################
def task_4_a():
print("Task 4 (a) ...")
print('-------------------------------------------------')
D = np.zeros((8,8))
W = np.array((
[0, 1, 0.2, 1, 0, 0, 0, 0], # A
[1, 0, 0.1, 0, 1, 0, 0, 0], # B
[0.2, 0.1, 0, 1, 0, 1, 0.3, 0], # C
[1, 0, 1, 0, 0, 1, 0, 0], # D
[0, 1, 0, 0, 0, 0, 1, 1], # E
[0, 0, 1, 1, 0, 0, 1, 0], # F
[0, 0, 0.3, 0, 1, 1, 0, 1], # G
[0, 0, 0, 0, 1, 0, 1, 0] # H
)) # construct the W matrix
for i in range(W.shape[0]):
D[i,i] = np.sum(W[i,:]) # construct the D matrix
'''
...
your code ...
...
'''
invSqrtD = np.linalg.inv(np.sqrt(D))
L = D - W
op = np.matmul(np.matmul(invSqrtD,L),invSqrtD)
_, _, eigenVecs = cv.eigen(op)
secMinEigenVec = eigenVecs[eigenVecs.shape[1]-2, :]
C1 = 0
C2 = 0
for i in range(secMinEigenVec.shape[0]):
if secMinEigenVec[i] < 0:
C1 += D[i,i]
else:
C2 += D[i,i]
print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))
# Figure in pdf
minNormCut = (1/C1 + 1/C2) * 2.4
print('Min Norm Cut = ' + str(minNormCut))
print('=================================================')
##############################################
##############################################
##############################################
# task_1_a()
# task_1_b()
# task_2()
# task_3_a()
# cv.destroyAllWindows()
# task_3_b()
# cv.destroyAllWindows()
# task_3_c()
# cv.destroyAllWindows()
task_4_a()
|
normal
|
{
"blob_id": "f7886f8d98ad0519f4635064f768f25dad101a3d",
"index": 2612,
"step-1": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\n<mask token>\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\n<mask token>\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(\n img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x, y]:\n for index_theta in range(len(theta_array)):\n rho_value = x * np.cos(theta_array[index_theta]\n ) + y * np.sin(theta_array[index_theta])\n index_rho = int(rho_value + rho / 2)\n if index_rho >= rho:\n continue\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta],\n rho_value))\n return detected_lines, accumulator\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_4_a():\n print('Task 4 (a) ...')\n print('-------------------------------------------------')\n D = np.zeros((8, 8))\n W = np.array(([0, 1, 0.2, 1, 0, 0, 0, 0], [1, 0, 0.1, 0, 1, 0, 0, 0], [\n 0.2, 0.1, 0, 1, 0, 1, 0.3, 0], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, \n 0, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0.3, 0, 1, 1, 0, 1\n ], [0, 0, 0, 0, 1, 0, 1, 0]))\n for i in range(W.shape[0]):\n D[i, i] = np.sum(W[i, :])\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n op = np.matmul(np.matmul(invSqrtD, L), invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1] - 2, :]\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i, i]\n else:\n C2 += D[i, i]\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n minNormCut = (1 / C1 + 1 / C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n\n<mask token>\n",
"step-4": "import numpy as np\nimport cv2 as cv\nimport random\nimport time\nrandom.seed(0)\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(\n img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x, y]:\n for index_theta in range(len(theta_array)):\n rho_value = x * np.cos(theta_array[index_theta]\n ) + y * np.sin(theta_array[index_theta])\n index_rho = int(rho_value + rho / 2)\n if index_rho >= rho:\n continue\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta],\n rho_value))\n return detected_lines, accumulator\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_4_a():\n print('Task 4 (a) ...')\n print('-------------------------------------------------')\n D = np.zeros((8, 8))\n W = np.array(([0, 1, 0.2, 1, 0, 0, 0, 0], [1, 0, 0.1, 0, 1, 0, 0, 0], [\n 0.2, 0.1, 0, 1, 0, 1, 0.3, 0], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, \n 0, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0.3, 0, 1, 1, 0, 1\n ], [0, 0, 0, 0, 1, 0, 1, 0]))\n for i in range(W.shape[0]):\n D[i, i] = np.sum(W[i, :])\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n op = np.matmul(np.matmul(invSqrtD, L), invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1] - 2, :]\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i, i]\n else:\n C2 += D[i, i]\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n minNormCut = (1 / C1 + 1 / C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n\ntask_4_a()\n",
"step-5": "import numpy as np\nimport cv2 as cv\nimport random\nimport time\n\nrandom.seed(0)\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n##############################################\n# Task 1 ##########################\n##############################################\n\n\ndef task_1_a():\n print(\"Task 1 (a) ...\")\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n edges = cv.Canny( gray_image,50,150)\n #cv.imshow('edges', edges)\n detected_lines = cv.HoughLines(edges,1,np.pi/180,10)\n #print (detected_lines)\n for rho,theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv.line(img,(x1,y1),(x2,y2),(0,255,0),1)\n displayImage('1_a Hough transform - detected lines ', img)\n \n\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n #print (rho)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n #print (theta)\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x,y]:\n for index_theta in range(len(theta_array)):\n #theta_value = theta * index_theta \n rho_value = x*np.cos(theta_array[index_theta]) + y*np.sin(theta_array[index_theta])\n # to avoid negative index\n index_rho = int (rho_value + rho/2) \n # to avoid index overflow\n if (index_rho >= rho) : continue\n #print('rhoindex')\n #print (index_rho)\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta], rho_value))\n \n return detected_lines, accumulator\n\n\ndef task_1_b():\n print(\"Task 1 (b) ...\")\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale\n edges = cv.Canny( img_gray,50,150) # detect the edges\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow(\"1_b Accumulator myHoughLines\", accumulator)\n #print (len(detected_lines))\n\n for theta,rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)\n displayImage('1_b Hough transform - own implementation', img)\n \n\n\n##############################################\n# Task 2 ##########################\n##############################################\n\n\ndef task_2():\n print(\"Task 2 ...\")\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale\n edges = cv.Canny( img_gray,50,150,apertureSize = 3) # detect the edges\n theta_res = 1 # set the resolution of theta\n d_res = 1 # set the distance resolution\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage(\"task_2_ accumulator - mean shift\", accumulator)\n #mean_shift(accumulator)\n\n\n##############################################\n# Task 3 ##########################\n##############################################\n\ndef myKmeans(data, k, useDist = False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype = int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n\n # initialize centers using some random points from data\n # ....\n\n # Randomly initialize centers with pixel difference of greater than 0\n\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n\n # Randomly initialize centers of different pixl values. Still buggy\n # start_time = time.time()\n # indices = np.arange(0,data.shape[0]).tolist()\n # for idx in range(centers.shape[0]):\n # if len(indices) > 0:\n # randIdx = random.choice(indices)\n # delIndices = np.unique(np.where((data*255).astype('uint8') == (data[randIdx]*255).astype('uint8'))).tolist()\n # if len(delIndices) > 0:\n # for i in range(len(delIndices)):\n # try:\n # indices.remove(delIndices[i])\n # except ValueError:\n # print('Value not found')\n # # print('Indices removed')\n # else:\n # randIdx = random.choice(range(data.shape[0]))\n # centers[idx] = randIdx \n # end_time = time.time()\n # print('Center no' + str(idx+1) + ' added in ' + str(round(end_time - start_time,5)) + ' seconds')\n\n # To debug uncomment the following lines\n # Sometimes the pixel values of two cluster centroids are too close\n # Therefore, one of the clusters might end up not having any points at all\n # print('Initial centers:\\n' + str(centers))\n # print('-------------------------------------------------')\n # centerVals = data[centers]\n # print('Pixel Values of initial centers:\\n' + str(centerVals))\n # print('-------------------------------------------------')\n\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n # assign each point to the cluster of closest center\n # ...\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist: \n # Since data is a vector, distance is only the difference\n # Normalize the distance to keep it between 0 and 1\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n \n # update clusters' centers and check for convergence\n # ...\n convCounter = 0\n for idx in range(centers.shape[0]):\n if (len(clusters[idx]) > 0):\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis = 0)\n diff = (np.abs(centerVals[idx] - meanVal)*255).astype('uint8')\n if (np.sum(diff) > threshold):\n # indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])\n indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n # if no pixel with the mean value is found, choose another pixel in the cluster\n # and continue\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n\n if convCounter == k:\n convergence = True\n \n iterationNo += 1\n print('iterationNo = ', iterationNo)\n \n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time - start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n\n return index, centers\n\n\ndef task_3_a():\n print(\"Task 3 (a) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n \n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n\n grayVec = np.reshape(grayImg.copy(), (-1,1))\n\n index, centers = myKmeans(grayVec, k)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.reshape(grayImg.shape))\n\n cv.waitKey(0)\n print('=================================================')\n\ndef task_3_b():\n print(\"Task 3 (b) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n\n cv.imshow('Color Image', imgFloat)\n\n K = [2, 4, 6]\n\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n\n imgVec = np.reshape(imgFloat.copy(), (-1,3))\n\n index, centers = myKmeans(imgVec, k)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape(imgFloat.shape))\n \n cv.waitKey(0)\n print('=================================================')\n\ndef task_3_c():\n print(\"Task 3 (c) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n \n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1,1))\n\n index, centers = myKmeans(grayVec, k, useDist = True)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' + str(k), grayVec.reshape(grayImg.shape))\n \n cv.waitKey(0)\n\n print('=================================================')\n\n\n##############################################\n# Task 4 ##########################\n##############################################\n\n\ndef task_4_a():\n print(\"Task 4 (a) ...\")\n print('-------------------------------------------------')\n D = np.zeros((8,8)) \n W = np.array((\n [0, 1, 0.2, 1, 0, 0, 0, 0], # A\n [1, 0, 0.1, 0, 1, 0, 0, 0], # B\n [0.2, 0.1, 0, 1, 0, 1, 0.3, 0], # C\n [1, 0, 1, 0, 0, 1, 0, 0], # D\n [0, 1, 0, 0, 0, 0, 1, 1], # E\n [0, 0, 1, 1, 0, 0, 1, 0], # F\n [0, 0, 0.3, 0, 1, 1, 0, 1], # G\n [0, 0, 0, 0, 1, 0, 1, 0] # H\n )) # construct the W matrix\n\n for i in range(W.shape[0]):\n D[i,i] = np.sum(W[i,:]) # construct the D matrix\n\n '''\n ...\n your code ...\n ...\n '''\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n\n op = np.matmul(np.matmul(invSqrtD,L),invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1]-2, :]\n\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i,i]\n else:\n C2 += D[i,i]\n\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n\n # Figure in pdf\n minNormCut = (1/C1 + 1/C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n##############################################\n##############################################\n##############################################\n\n\n# task_1_a()\n# task_1_b()\n# task_2()\n# task_3_a()\n# cv.destroyAllWindows()\n# task_3_b()\n# cv.destroyAllWindows()\n# task_3_c()\n# cv.destroyAllWindows()\ntask_4_a()",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
# 213. 打家劫舍 II
# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。
# 同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。
# 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。
class Solution:
# 86.24%, 15.46%
def rob(self, nums) -> int:
n = len(nums)
if n == 0:
return 0
if n == 1:
return nums[0]
return max(self.helper(nums[1:],n-1),self.helper(nums[:-1],n-1))
def helper(self,nums,n):
if n == 1:
return nums[0]
dp = [0] * n
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, n):
dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])
return dp[n-1]
# 优秀解答
def rob2(self, nums) -> int:
n = len(nums)
if nums == []:
return 0
if len(nums) == 1:
return nums[0]
# 抢了
dp = [[0, 0] for _ in range(n)]
dp[0][1] = nums[0]
dp[0][0] = float('-inf')
for i in range(1, n):
dp[i][1] = dp[i - 1][0] + nums[i]
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
tmp_max = dp[n - 1][0]
# 没抢
dp = [[0, 0] for _ in range(n)]
dp[0][1] = float('-inf')
dp[0][0] = 0
for i in range(1, n):
dp[i][1] = dp[i - 1][0] + nums[i]
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
return max(dp[n - 1][0], dp[n - 1][1], tmp_max)
|
normal
|
{
"blob_id": "59b2c9d279168a806e59fb7529ab12d7b86107bc",
"index": 5340,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n <mask token>\n",
"step-3": "class Solution:\n <mask token>\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n\n def rob2(self, nums) ->int:\n n = len(nums)\n if nums == []:\n return 0\n if len(nums) == 1:\n return nums[0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = nums[0]\n dp[0][0] = float('-inf')\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n tmp_max = dp[n - 1][0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = float('-inf')\n dp[0][0] = 0\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\n",
"step-4": "class Solution:\n\n def rob(self, nums) ->int:\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n return max(self.helper(nums[1:], n - 1), self.helper(nums[:-1], n - 1))\n\n def helper(self, nums, n):\n if n == 1:\n return nums[0]\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\n return dp[n - 1]\n\n def rob2(self, nums) ->int:\n n = len(nums)\n if nums == []:\n return 0\n if len(nums) == 1:\n return nums[0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = nums[0]\n dp[0][0] = float('-inf')\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n tmp_max = dp[n - 1][0]\n dp = [[0, 0] for _ in range(n)]\n dp[0][1] = float('-inf')\n dp[0][0] = 0\n for i in range(1, n):\n dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\n",
"step-5": "# 213. 打家劫舍 II\r\n# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都 围成一圈 ,这意味着第一个房屋和最后一个房屋是紧挨着的。\r\n# 同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警 。\r\n# 给定一个代表每个房屋存放金额的非负整数数组,计算你 在不触动警报装置的情况下 ,能够偷窃到的最高金额。\r\n\r\nclass Solution:\r\n # 86.24%, 15.46%\r\n def rob(self, nums) -> int:\r\n n = len(nums)\r\n if n == 0:\r\n return 0\r\n if n == 1:\r\n return nums[0]\r\n return max(self.helper(nums[1:],n-1),self.helper(nums[:-1],n-1))\r\n\r\n\r\n def helper(self,nums,n):\r\n if n == 1:\r\n return nums[0]\r\n dp = [0] * n\r\n dp[0] = nums[0]\r\n dp[1] = max(nums[0], nums[1])\r\n for i in range(2, n):\r\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i])\r\n return dp[n-1]\r\n\r\n # 优秀解答\r\n def rob2(self, nums) -> int:\r\n n = len(nums)\r\n if nums == []:\r\n return 0\r\n if len(nums) == 1:\r\n return nums[0]\r\n # 抢了\r\n dp = [[0, 0] for _ in range(n)]\r\n dp[0][1] = nums[0]\r\n dp[0][0] = float('-inf')\r\n for i in range(1, n):\r\n dp[i][1] = dp[i - 1][0] + nums[i]\r\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\r\n tmp_max = dp[n - 1][0]\r\n\r\n # 没抢\r\n dp = [[0, 0] for _ in range(n)]\r\n dp[0][1] = float('-inf')\r\n dp[0][0] = 0\r\n for i in range(1, n):\r\n dp[i][1] = dp[i - 1][0] + nums[i]\r\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\r\n return max(dp[n - 1][0], dp[n - 1][1], tmp_max)\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def load_yaml_config(config_path: str) ->Dict:
with open(config_path, 'r') as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) ->OrderedDict:
return torch.load(path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_device() ->torch.device:
if torch.cuda.is_available():
return torch.device('cuda')
return torch.device('cpu')
def load_yaml_config(config_path: str) ->Dict:
with open(config_path, 'r') as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) ->OrderedDict:
return torch.load(path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_device() ->torch.device:
if torch.cuda.is_available():
return torch.device('cuda')
return torch.device('cpu')
def load_yaml_config(config_path: str) ->Dict:
with open(config_path, 'r') as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) ->OrderedDict:
return torch.load(path)
def load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):
if checkpoint_path:
model.load_state_dict(load_state_dict(checkpoint_path))
<|reserved_special_token_1|>
from typing import Dict, Optional
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
def get_device() ->torch.device:
if torch.cuda.is_available():
return torch.device('cuda')
return torch.device('cpu')
def load_yaml_config(config_path: str) ->Dict:
with open(config_path, 'r') as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) ->OrderedDict:
return torch.load(path)
def load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):
if checkpoint_path:
model.load_state_dict(load_state_dict(checkpoint_path))
<|reserved_special_token_1|>
from typing import Dict, Optional
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
def get_device() -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
return torch.device("cpu")
def load_yaml_config(config_path: str) -> Dict:
with open(config_path, "r") as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) -> optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) -> OrderedDict:
return torch.load(path)
def load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):
if checkpoint_path:
model.load_state_dict(load_state_dict(checkpoint_path))
|
flexible
|
{
"blob_id": "e8a36bd7826c5d71cf8012ea82df6c127dd858fc",
"index": 549,
"step-1": "<mask token>\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"step-4": "from typing import Dict, Optional\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"step-5": "from typing import Dict, Optional\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n return torch.device(\"cpu\")\n\n\ndef load_yaml_config(config_path: str) -> Dict:\n with open(config_path, \"r\") as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) -> optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) -> OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def kmeansplus(X, K, n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n * K).reshape(n, K)
centers = np.zeros(X.shape[1] * K).reshape(K, -1)
pr = np.repeat(1 / n, n)
centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)
for k in np.arange(1, K):
pr = np.sum(distance, axis=1) / np.sum(distance)
centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)
for _ in range(n_iter):
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))
for k in range(K):
centers[k, :] = X[idx == k, :].mean(axis=0)
return idx, centers
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kmeansplus(X, K, n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n * K).reshape(n, K)
centers = np.zeros(X.shape[1] * K).reshape(K, -1)
pr = np.repeat(1 / n, n)
centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)
for k in np.arange(1, K):
pr = np.sum(distance, axis=1) / np.sum(distance)
centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)
for _ in range(n_iter):
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))
for k in range(K):
centers[k, :] = X[idx == k, :].mean(axis=0)
return idx, centers
def main():
np.random.seed(123)
x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(
size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2
), np.random.normal(size=20, loc=25, scale=2)]
x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal
(size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=
2), np.random.normal(size=20, loc=0, scale=2)]
X = np.c_[x1, x2]
plt.figure(figsize=(6, 6))
plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)
plt.show()
K = 4
centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])
inter = 9
idx, centers = kmeansplus(X, K, inter)
data = pd.DataFrame(X, columns=['X', 'Y'])
data['idx'] = idx
data0 = data[data.idx == 0]
data1 = data[data.idx == 1]
data2 = data[data.idx == 2]
data3 = data[data.idx == 3]
plt.figure(figsize=(6, 6))
plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)
plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)
plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)
plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)
plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kmeansplus(X, K, n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n * K).reshape(n, K)
centers = np.zeros(X.shape[1] * K).reshape(K, -1)
pr = np.repeat(1 / n, n)
centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)
for k in np.arange(1, K):
pr = np.sum(distance, axis=1) / np.sum(distance)
centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)
for _ in range(n_iter):
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))
for k in range(K):
centers[k, :] = X[idx == k, :].mean(axis=0)
return idx, centers
def main():
np.random.seed(123)
x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(
size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2
), np.random.normal(size=20, loc=25, scale=2)]
x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal
(size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=
2), np.random.normal(size=20, loc=0, scale=2)]
X = np.c_[x1, x2]
plt.figure(figsize=(6, 6))
plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)
plt.show()
K = 4
centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])
inter = 9
idx, centers = kmeansplus(X, K, inter)
data = pd.DataFrame(X, columns=['X', 'Y'])
data['idx'] = idx
data0 = data[data.idx == 0]
data1 = data[data.idx == 1]
data2 = data[data.idx == 2]
data3 = data[data.idx == 3]
plt.figure(figsize=(6, 6))
plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)
plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)
plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)
plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)
plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])
plt.show()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
def kmeansplus(X, K, n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n * K).reshape(n, K)
centers = np.zeros(X.shape[1] * K).reshape(K, -1)
pr = np.repeat(1 / n, n)
centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)
for k in np.arange(1, K):
pr = np.sum(distance, axis=1) / np.sum(distance)
centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]
distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)
for _ in range(n_iter):
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))
for k in range(K):
centers[k, :] = X[idx == k, :].mean(axis=0)
return idx, centers
def main():
np.random.seed(123)
x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(
size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2
), np.random.normal(size=20, loc=25, scale=2)]
x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal
(size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=
2), np.random.normal(size=20, loc=0, scale=2)]
X = np.c_[x1, x2]
plt.figure(figsize=(6, 6))
plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)
plt.show()
K = 4
centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])
inter = 9
idx, centers = kmeansplus(X, K, inter)
data = pd.DataFrame(X, columns=['X', 'Y'])
data['idx'] = idx
data0 = data[data.idx == 0]
data1 = data[data.idx == 1]
data2 = data[data.idx == 2]
data3 = data[data.idx == 3]
plt.figure(figsize=(6, 6))
plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)
plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)
plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)
plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)
plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])
plt.show()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
# KMeans
# 参考 https://qiita.com/g-k/items/0d5d22a12a4507ecbf11
#
# データを適当なクラスタに分けた後、クラスタの平均を用いてうまい具合にデータがわかれるように調整させていくアルゴリズム
# 任意の指定のk個のクラスタを作成するアルゴリズムであることから、k-means法(k点平均法と呼ばれている)
# k-meansの初期値選択の弱点を解消したのが、k-means++
# k-means++では、中心点が互いに遠いところに配置されるような確率が高くなるように操作する。
# 教師なし学習のアルゴリズム
# 主に正解ラベルの無いベクトル形式のデータをクラスタリングするのに用いられる。
# 1 1つ目の中心点を、データ点の中から均等な確率でランダムに選ぶ。
# 2 残り全てのデータ点について、既存の中心点との距離の2乗を計算して足し合わせる。
# 3 2.の結果を合計した値で、それぞれの距離の2乗を割る。
# 4 3.の結果を新たな確率として、2つ目の中心点を選ぶ。
# 5 2.~4.を、クラスター数と同じ数の中心点が出来るまで繰り返す。
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
# 入力:データ、クラスター数、中心点の初期値、繰り返し回数
# 出力:各クラスターの中心点、各データ点の属するクラスター番号
def kmeansplus(X,K,n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n*K).reshape(n,K)
centers = np.zeros(X.shape[1]*K).reshape(K,-1)
#最初の確率は均等
pr = np.repeat(1/n,n)
#1つ目の中心点はランダムに選ぶ
centers[0,:] = X[np.random.choice(np.arange(n),1,p=pr),]
distance[:,0] = np.sum((X-centers[0,:])**2,axis=1)
for k in np.arange(1,K):
pr = np.sum(distance,axis=1)/np.sum(distance)
centers[k,:] = X[np.random.choice(np.arange(n),1,p=pr),]
distance[:,k] = np.sum((X-centers[k,:])**2,axis=1)
for _ in range(n_iter):
#データ点と中心点の距離を計算し、一番近い中心点のインデックス(クラスター番号)を返す。
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i,:] - centers)**2,axis=1))
#重心を計算して中心点を移動させる
for k in range(K):
centers[k,:] = X[idx==k,:].mean(axis=0)
return idx,centers
def main():
# サンプルとして、4種類の2次元正規乱数に従う点を各20個ずつ、計80個生成した。
# データは以下のように散らばっている
#データの生成
np.random.seed(123)
x1 = np.r_[np.random.normal(size=20,loc=1,scale=2),np.random.normal(size=20,loc=8,scale=2)
,np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=25,scale=2)]
x2 = np.r_[np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=1,scale=2)
,np.random.normal(size=20,loc=20,scale=2),np.random.normal(size=20,loc=0,scale=2)]
X = np.c_[x1,x2]
#可視化
plt.figure(figsize=(6,6))
plt.scatter(X[:,0],X[:,1],c="black",s=10,alpha=0.5)
plt.show()
# k-means法で4グループにクラスタリングしてみる。
# 簡単のため、繰り返し回数は4回とする。
K=4
centers = np.array([[0,5],[5,0],[10,15],[20,10]])
inter = 9
idx, centers = kmeansplus(X,K,inter)
data = pd.DataFrame(X,columns=["X","Y"])
data["idx"] = idx
data0 = data[data.idx==0]
data1 = data[data.idx==1]
data2 = data[data.idx==2]
data3 = data[data.idx==3]
plt.figure(figsize=(6,6))
plt.scatter(data0.X,data0.Y,color="r",s=10,alpha=0.5)
plt.scatter(data1.X,data1.Y,color="b",s=10,alpha=0.5)
plt.scatter(data2.X,data2.Y,color="g",s=10,alpha=0.5)
plt.scatter(data3.X,data3.Y,color="orange",s=10,alpha=0.5)
plt.scatter(centers[:,0],centers[:,1],color=["r","b","g","orange"])
plt.show()
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "10bf7959f178d3b5c0ce6e97253e665d32363af7",
"index": 6015,
"step-1": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\nplt.show()\nif __name__ == '__main__':\n main()\n",
"step-4": "import matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.model_selection import train_test_split\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\nplt.show()\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\r\n\r\n# KMeans\r\n# 参考 https://qiita.com/g-k/items/0d5d22a12a4507ecbf11\r\n# \r\n# データを適当なクラスタに分けた後、クラスタの平均を用いてうまい具合にデータがわかれるように調整させていくアルゴリズム\r\n# 任意の指定のk個のクラスタを作成するアルゴリズムであることから、k-means法(k点平均法と呼ばれている)\r\n\r\n# k-meansの初期値選択の弱点を解消したのが、k-means++\r\n# k-means++では、中心点が互いに遠いところに配置されるような確率が高くなるように操作する。\r\n\r\n# 教師なし学習のアルゴリズム\r\n# 主に正解ラベルの無いベクトル形式のデータをクラスタリングするのに用いられる。\r\n# 1 1つ目の中心点を、データ点の中から均等な確率でランダムに選ぶ。\r\n# 2 残り全てのデータ点について、既存の中心点との距離の2乗を計算して足し合わせる。\r\n# 3 2.の結果を合計した値で、それぞれの距離の2乗を割る。\r\n# 4 3.の結果を新たな確率として、2つ目の中心点を選ぶ。\r\n# 5 2.~4.を、クラスター数と同じ数の中心点が出来るまで繰り返す。\r\n\r\nimport matplotlib.font_manager as fm\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom matplotlib import cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\n# 入力:データ、クラスター数、中心点の初期値、繰り返し回数\r\n# 出力:各クラスターの中心点、各データ点の属するクラスター番号\r\ndef kmeansplus(X,K,n_iter):\r\n n = X.shape[0]\r\n idx = np.zeros(X.shape[0])\r\n distance = np.zeros(n*K).reshape(n,K)\r\n centers = np.zeros(X.shape[1]*K).reshape(K,-1)\r\n\r\n #最初の確率は均等\r\n pr = np.repeat(1/n,n)\r\n #1つ目の中心点はランダムに選ぶ\r\n centers[0,:] = X[np.random.choice(np.arange(n),1,p=pr),]\r\n distance[:,0] = np.sum((X-centers[0,:])**2,axis=1)\r\n \r\n for k in np.arange(1,K):\r\n pr = np.sum(distance,axis=1)/np.sum(distance)\r\n centers[k,:] = X[np.random.choice(np.arange(n),1,p=pr),]\r\n distance[:,k] = np.sum((X-centers[k,:])**2,axis=1)\r\n \r\n for _ in range(n_iter):\r\n #データ点と中心点の距離を計算し、一番近い中心点のインデックス(クラスター番号)を返す。\r\n for i in range(X.shape[0]):\r\n idx[i] = np.argmin(np.sum((X[i,:] - centers)**2,axis=1))\r\n #重心を計算して中心点を移動させる\r\n for k in range(K):\r\n centers[k,:] = X[idx==k,:].mean(axis=0)\r\n\r\n return idx,centers\r\n\r\ndef main():\r\n # サンプルとして、4種類の2次元正規乱数に従う点を各20個ずつ、計80個生成した。\r\n # データは以下のように散らばっている\r\n\r\n #データの生成\r\n np.random.seed(123)\r\n x1 = np.r_[np.random.normal(size=20,loc=1,scale=2),np.random.normal(size=20,loc=8,scale=2)\r\n ,np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=25,scale=2)]\r\n x2 = np.r_[np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=1,scale=2)\r\n ,np.random.normal(size=20,loc=20,scale=2),np.random.normal(size=20,loc=0,scale=2)]\r\n X = np.c_[x1,x2]\r\n\r\n #可視化\r\n plt.figure(figsize=(6,6))\r\n plt.scatter(X[:,0],X[:,1],c=\"black\",s=10,alpha=0.5)\r\n plt.show()\r\n\r\n # k-means法で4グループにクラスタリングしてみる。\r\n # 簡単のため、繰り返し回数は4回とする。\r\n K=4\r\n centers = np.array([[0,5],[5,0],[10,15],[20,10]])\r\n inter = 9\r\n\r\n idx, centers = kmeansplus(X,K,inter)\r\n\r\n data = pd.DataFrame(X,columns=[\"X\",\"Y\"])\r\n data[\"idx\"] = idx\r\n\r\n data0 = data[data.idx==0]\r\n data1 = data[data.idx==1]\r\n data2 = data[data.idx==2]\r\n data3 = data[data.idx==3]\r\n\r\n plt.figure(figsize=(6,6))\r\n plt.scatter(data0.X,data0.Y,color=\"r\",s=10,alpha=0.5)\r\n plt.scatter(data1.X,data1.Y,color=\"b\",s=10,alpha=0.5)\r\n plt.scatter(data2.X,data2.Y,color=\"g\",s=10,alpha=0.5)\r\n plt.scatter(data3.X,data3.Y,color=\"orange\",s=10,alpha=0.5)\r\n plt.scatter(centers[:,0],centers[:,1],color=[\"r\",\"b\",\"g\",\"orange\"]) \r\n plt.show()\r\n\r\n\r\nplt.show()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import tensorflow as tf
x_data = np.random.rand(100)
y_data = x_data * 10 + 5
#构造线性模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y=k*x_data+b
#二次代价函数 square求平方
loss= tf.reduce_mean(tf.square(y_data-y))
#定义一个梯度下降法来进行训练的优化器
optimizer=tf.train.GradientDescentOptimizer(.2)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as ss:
ss.run(init)
for step in range(201):
ss.run(train)
if step %10==0:
print(step,ss.run([k,b]))
|
normal
|
{
"blob_id": "ba7f66a0f9cf1028add778315033d596e10d6f16",
"index": 3197,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-3": "<mask token>\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\nb = tf.Variable(0.0)\nk = tf.Variable(0.0)\ny = k * x_data + b\nloss = tf.reduce_mean(tf.square(y_data - y))\noptimizer = tf.train.GradientDescentOptimizer(0.2)\ntrain = optimizer.minimize(loss)\ninit = tf.global_variables_initializer()\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\nb = tf.Variable(0.0)\nk = tf.Variable(0.0)\ny = k * x_data + b\nloss = tf.reduce_mean(tf.square(y_data - y))\noptimizer = tf.train.GradientDescentOptimizer(0.2)\ntrain = optimizer.minimize(loss)\ninit = tf.global_variables_initializer()\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-5": "import numpy as np\nimport tensorflow as tf\n\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\n\n#构造线性模型\nb = tf.Variable(0.)\nk = tf.Variable(0.)\ny=k*x_data+b\n\n\n#二次代价函数 square求平方\nloss= tf.reduce_mean(tf.square(y_data-y))\n\n#定义一个梯度下降法来进行训练的优化器\n\noptimizer=tf.train.GradientDescentOptimizer(.2)\n\ntrain=optimizer.minimize(loss)\n\ninit=tf.global_variables_initializer()\n\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step %10==0:\n print(step,ss.run([k,b]))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class HashTableEntry:
"""
Hash Table entry, as a linked list node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
# hash = 0xff
hash = 0xcbf29ce484222325
for n in key.encode():
# print(n)
hash = hash ^ n
hash = hash * 0x100000001b3
# print(hash)
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
# hash = ((hash << 5) + hash) + n
hash = hash * 33 + n
return hash
# return hash & 0xFFFFFFFF
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
# return self.fnv1(key) % self.capacity
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
# if that hi is empty ignore
# if self.storage[hi] is None:
# print("WARNING: no key")
# return
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if (current and current.key == key):
# if its the first link in the list
if (current == self.storage[hi]):
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print("WARNING: no key")
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if (self.storage[hi]):
if(self.storage[hi].next):
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity*factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
# Solution 2 - Much cleaner
# newHashTable = HashTable(round(self.capacity*factor))
# for i, v in enumerate(self.storage):
# while v:
# newHashTable.put(v.key, v.value)
# v = v.next
# self.capacity = newHashTable.capacity
# self.storage = newHashTable.storage
def calculateLoad(self):
load = self.numberOfItems/len(self.storage)
# print("Items:\t", ht.numberOfItems)
# print("Storage:", len(ht.storage))
# print("LOAD:\t", load)
# comment code bellow to pass tests
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
if __name__ == "__main__":
ht = HashTable(2)
ht.put("line_1", "111")
ht.put("line_2", "222")
ht.put("line_3", "333")
ht.put("line_4", "sss")
ht.put("line_5", "ddd")
ht.put("line_6", "ggg")
ht.put("line_7", "hhh")
ht.put("line_12", "jjj")
print("")
# Test storing beyond capacity
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
# Test resizing
old_capacity = len(ht.storage)
ht.resize()
new_capacity = len(ht.storage)
print(f"\nResized from {old_capacity} to {new_capacity}.\n")
# print("1: ", ht.storage[1].value)
# print("1: ", ht.storage[1].next.value)
# print("3: ", ht.storage[3].value)
# print("3: ", ht.storage[3].next.value)
# print("3: ", ht.storage[3].next.next.value)
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
ht.delete("line_3")
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
# Test if data intact after resizing
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
print("")
|
normal
|
{
"blob_id": "7e58fe636e6d835d7857a49900bbc127b52f63d9",
"index": 6112,
"step-1": "<mask token>\n\n\nclass HashTable:\n <mask token>\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n <mask token>\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n <mask token>\n <mask token>\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\n<mask token>\n",
"step-3": "class HashTableEntry:\n <mask token>\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\n<mask token>\n",
"step-4": "class HashTableEntry:\n \"\"\"\n Hash Table entry, as a linked list node.\n \"\"\"\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\nif __name__ == '__main__':\n ht = HashTable(2)\n ht.put('line_1', '111')\n ht.put('line_2', '222')\n ht.put('line_3', '333')\n ht.put('line_4', 'sss')\n ht.put('line_5', 'ddd')\n ht.put('line_6', 'ggg')\n ht.put('line_7', 'hhh')\n ht.put('line_12', 'jjj')\n print('')\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n print(f'\\nResized from {old_capacity} to {new_capacity}.\\n')\n print('')\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print('')\n ht.delete('line_3')\n print('')\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print('')\n print('')\n",
"step-5": "class HashTableEntry:\n \"\"\"\n Hash Table entry, as a linked list node.\n \"\"\"\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n\n hash = 5381\n for n in key.encode():\n # hash = ((hash << 5) + hash) + n\n hash = hash * 33 + n\n\n return hash\n # return hash & 0xFFFFFFFF\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n # return self.fnv1(key) % self.capacity\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n\n hi = self.hash_index(key)\n\n # if that hi is empty ignore\n # if self.storage[hi] is None:\n # print(\"WARNING: no key\")\n # return\n\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n\n if (current and current.key == key):\n # if its the first link in the list\n if (current == self.storage[hi]):\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n\n self.numberOfItems -= 1\n else:\n print(\"WARNING: no key\")\n\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if (self.storage[hi]):\n if(self.storage[hi].next):\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity*factor)\n newarr = [None] * self.capacity\n\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n\n v = v.next\n\n self.storage = newarr\n\n # Solution 2 - Much cleaner\n # newHashTable = HashTable(round(self.capacity*factor))\n # for i, v in enumerate(self.storage):\n # while v:\n # newHashTable.put(v.key, v.value)\n # v = v.next\n\n # self.capacity = newHashTable.capacity\n # self.storage = newHashTable.storage\n\n def calculateLoad(self):\n load = self.numberOfItems/len(self.storage)\n\n # print(\"Items:\\t\", ht.numberOfItems)\n # print(\"Storage:\", len(ht.storage))\n # print(\"LOAD:\\t\", load)\n\n # comment code bellow to pass tests\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n\n pass\n\n\nif __name__ == \"__main__\":\n ht = HashTable(2)\n\n ht.put(\"line_1\", \"111\")\n ht.put(\"line_2\", \"222\")\n ht.put(\"line_3\", \"333\")\n ht.put(\"line_4\", \"sss\")\n ht.put(\"line_5\", \"ddd\")\n ht.put(\"line_6\", \"ggg\")\n ht.put(\"line_7\", \"hhh\")\n ht.put(\"line_12\", \"jjj\")\n\n print(\"\")\n\n # Test storing beyond capacity\n # print(ht.get(\"line_1\"))\n # print(ht.get(\"line_2\"))\n # print(ht.get(\"line_3\"))\n # print(ht.get(\"line_4\"))\n # print(ht.get(\"line_5\"))\n # print(ht.get(\"line_6\"))\n # print(ht.get(\"line_7\"))\n\n # Test resizing\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # print(\"1: \", ht.storage[1].value)\n # print(\"1: \", ht.storage[1].next.value)\n\n # print(\"3: \", ht.storage[3].value)\n # print(\"3: \", ht.storage[3].next.value)\n # print(\"3: \", ht.storage[3].next.next.value)\n\n print(\"\")\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print(\"\")\n ht.delete(\"line_3\")\n print(\"\")\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print(\"\")\n\n # Test if data intact after resizing\n # print(ht.get(\"line_1\"))\n # print(ht.get(\"line_2\"))\n # print(ht.get(\"line_3\"))\n # print(ht.get(\"line_4\"))\n # print(ht.get(\"line_5\"))\n # print(ht.get(\"line_6\"))\n # print(ht.get(\"line_7\"))\n\n print(\"\")\n",
"step-ids": [
6,
11,
13,
15,
16
]
}
|
[
6,
11,
13,
15,
16
] |
def longest(s1, s2):
# your code
s=s1+s2
st="".join(sorted(set(s)))
return st
longest("xyaabbbccccdefww","xxxxyyyyabklmopq")
|
normal
|
{
"blob_id": "7d54d5fd855c7c03d2d4739e8ad4f9ab8772ca2b",
"index": 3977,
"step-1": "<mask token>\n",
"step-2": "def longest(s1, s2):\n s = s1 + s2\n st = ''.join(sorted(set(s)))\n return st\n\n\n<mask token>\n",
"step-3": "def longest(s1, s2):\n s = s1 + s2\n st = ''.join(sorted(set(s)))\n return st\n\n\nlongest('xyaabbbccccdefww', 'xxxxyyyyabklmopq')\n",
"step-4": "def longest(s1, s2):\n # your code\n s=s1+s2\n st=\"\".join(sorted(set(s))) \n return st\n \n \nlongest(\"xyaabbbccccdefww\",\"xxxxyyyyabklmopq\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import shelve
arguments = ["self", "info", "args", "world"]
minlevel = 2
helpstring = "moneyreset"
def main(connection, info, args, world) :
"""Resets a users money"""
money = shelve.open("money-%s.db" % (world.hostnicks[connection.host]), writeback=True)
money[info["sender"]] = {"money":100000, "maxmoney":100000, "items":[], "coinchance":[True for x in range(50)] + [False for x in range(50)]}
money.sync()
connection.ircsend(info["channel"], "%s: Your money data has been reset." % (info["sender"]))
|
normal
|
{
"blob_id": "95021cc01c0b85b512fd466797d4d128472773c3",
"index": 2943,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(connection, info, args, world):\n \"\"\"Resets a users money\"\"\"\n money = shelve.open('money-%s.db' % world.hostnicks[connection.host],\n writeback=True)\n money[info['sender']] = {'money': 100000, 'maxmoney': 100000, 'items':\n [], 'coinchance': [(True) for x in range(50)] + [(False) for x in\n range(50)]}\n money.sync()\n connection.ircsend(info['channel'], \n '%s: Your money data has been reset.' % info['sender'])\n",
"step-3": "<mask token>\narguments = ['self', 'info', 'args', 'world']\nminlevel = 2\nhelpstring = 'moneyreset'\n\n\ndef main(connection, info, args, world):\n \"\"\"Resets a users money\"\"\"\n money = shelve.open('money-%s.db' % world.hostnicks[connection.host],\n writeback=True)\n money[info['sender']] = {'money': 100000, 'maxmoney': 100000, 'items':\n [], 'coinchance': [(True) for x in range(50)] + [(False) for x in\n range(50)]}\n money.sync()\n connection.ircsend(info['channel'], \n '%s: Your money data has been reset.' % info['sender'])\n",
"step-4": "import shelve\narguments = ['self', 'info', 'args', 'world']\nminlevel = 2\nhelpstring = 'moneyreset'\n\n\ndef main(connection, info, args, world):\n \"\"\"Resets a users money\"\"\"\n money = shelve.open('money-%s.db' % world.hostnicks[connection.host],\n writeback=True)\n money[info['sender']] = {'money': 100000, 'maxmoney': 100000, 'items':\n [], 'coinchance': [(True) for x in range(50)] + [(False) for x in\n range(50)]}\n money.sync()\n connection.ircsend(info['channel'], \n '%s: Your money data has been reset.' % info['sender'])\n",
"step-5": "import shelve\narguments = [\"self\", \"info\", \"args\", \"world\"]\nminlevel = 2\nhelpstring = \"moneyreset\"\n\ndef main(connection, info, args, world) :\n \"\"\"Resets a users money\"\"\"\n money = shelve.open(\"money-%s.db\" % (world.hostnicks[connection.host]), writeback=True)\n money[info[\"sender\"]] = {\"money\":100000, \"maxmoney\":100000, \"items\":[], \"coinchance\":[True for x in range(50)] + [False for x in range(50)]}\n money.sync()\n connection.ircsend(info[\"channel\"], \"%s: Your money data has been reset.\" % (info[\"sender\"]))\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower())
out = BytesIO()
image.save(out, 'png')
out.seek(0)
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data
code = Captcha.gene_text(number=4)
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code)
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=
form.get_all_errors())
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower())
out = BytesIO()
image.save(out, 'png')
out.seek(0)
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bp = Blueprint('common', __name__, url_prefix='/c')
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data
code = Captcha.gene_text(number=4)
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code)
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=
form.get_all_errors())
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower())
out = BytesIO()
image.save(out, 'png')
out.seek(0)
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
<|reserved_special_token_1|>
from flask import Blueprint, request, make_response
from untils import restful, cacheuntil
from untils.captcha import Captcha
from exts import smsapi
from .forms import SMSCaptchaForm
from io import BytesIO
bp = Blueprint('common', __name__, url_prefix='/c')
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data
code = Captcha.gene_text(number=4)
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code)
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=
form.get_all_errors())
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower())
out = BytesIO()
image.save(out, 'png')
out.seek(0)
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
<|reserved_special_token_1|>
from flask import Blueprint, request, make_response
from untils import restful, cacheuntil
from untils.captcha import Captcha
from exts import smsapi
from .forms import SMSCaptchaForm
from io import BytesIO
bp = Blueprint('common', __name__, url_prefix='/c')
# @bp.route('/sms_captcha/', methods=['post'])
# def sms_captcha():
# telephone = request.form.get('telephone')
# if not telephone:
# return restful.params_error(message='请传入手机号码!')
# code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
# resp = smsapi.send_sms(telephone=telephone, param=code)
# if resp:
# return restful.success(message='短信验证码发送成功!')
# else:
# return restful.params_error(message='短信验证码发送失败!')
# TODO: 发送短信验证码
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data # TODO: 获取手机号
code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code) # TODO: redis存储短信验证码
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=form.get_all_errors())
# TODO: 图形验证码视图
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower()) # TODO: redis存储图片验证码
out = BytesIO()
# TODO: 将图片保存到IO中格式png
image.save(out, 'png')
# TODO: 保存完毕后,移动指针到起始位置
out.seek(0)
# TODO: 将IO读取出来转为image/png响应
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
|
flexible
|
{
"blob_id": "856beaf3b9dad333d5b48c1be3a8ad917f8d020c",
"index": 3634,
"step-1": "<mask token>\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-2": "<mask token>\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-3": "<mask token>\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-4": "from flask import Blueprint, request, make_response\nfrom untils import restful, cacheuntil\nfrom untils.captcha import Captcha\nfrom exts import smsapi\nfrom .forms import SMSCaptchaForm\nfrom io import BytesIO\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-5": "from flask import Blueprint, request, make_response\nfrom untils import restful, cacheuntil\nfrom untils.captcha import Captcha\nfrom exts import smsapi\nfrom .forms import SMSCaptchaForm\nfrom io import BytesIO\n\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\n# @bp.route('/sms_captcha/', methods=['post'])\n# def sms_captcha():\n# telephone = request.form.get('telephone')\n# if not telephone:\n# return restful.params_error(message='请传入手机号码!')\n# code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串\n# resp = smsapi.send_sms(telephone=telephone, param=code)\n# if resp:\n# return restful.success(message='短信验证码发送成功!')\n# else:\n# return restful.params_error(message='短信验证码发送失败!')\n\n\n# TODO: 发送短信验证码\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data # TODO: 获取手机号\n code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code) # TODO: redis存储短信验证码\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=form.get_all_errors())\n\n\n# TODO: 图形验证码视图\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower()) # TODO: redis存储图片验证码\n out = BytesIO()\n # TODO: 将图片保存到IO中格式png\n image.save(out, 'png')\n # TODO: 保存完毕后,移动指针到起始位置\n out.seek(0)\n # TODO: 将IO读取出来转为image/png响应\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Knn(object):
<|reserved_special_token_0|>
def __init__(self, TrainingData):
self.TrainingData = TrainingData
self.nFeatures = self.TrainingData.shape[1] - 1
self.data = TrainingData[:, 0:self.nFeatures].astype(float)
self.FeatureRange = []
self.normalize()
def normalize(self, weights=None):
if weights == None:
weights = np.ones(self.nFeatures)
for i in range(self.nFeatures):
mn = np.min(self.data[:, i])
self.data[:, i] -= mn
mx = np.max(self.data[:, i])
self.data[:, i] /= mx
self.FeatureRange.append([mn, mx])
def Check(self, pnt):
for i in range(self.nFeatures):
pnt[i] -= self.FeatureRange[i][0]
pnt[i] /= self.FeatureRange[i][1]
distances = []
for i in range(len(self.data)):
dist = np.linalg.norm(pnt - self.data[i])
distances.append(dist)
order = np.argsort(distances)
c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])
ans = c.most_common(3)
print(ans[0][0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Knn(object):
"""docstring for data"""
def __init__(self, TrainingData):
self.TrainingData = TrainingData
self.nFeatures = self.TrainingData.shape[1] - 1
self.data = TrainingData[:, 0:self.nFeatures].astype(float)
self.FeatureRange = []
self.normalize()
def normalize(self, weights=None):
if weights == None:
weights = np.ones(self.nFeatures)
for i in range(self.nFeatures):
mn = np.min(self.data[:, i])
self.data[:, i] -= mn
mx = np.max(self.data[:, i])
self.data[:, i] /= mx
self.FeatureRange.append([mn, mx])
def Check(self, pnt):
for i in range(self.nFeatures):
pnt[i] -= self.FeatureRange[i][0]
pnt[i] /= self.FeatureRange[i][1]
distances = []
for i in range(len(self.data)):
dist = np.linalg.norm(pnt - self.data[i])
distances.append(dist)
order = np.argsort(distances)
c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])
ans = c.most_common(3)
print(ans[0][0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TrainingData = np.loadtxt('Data2', delimiter=',', skiprows=1, dtype=str)
class Knn(object):
"""docstring for data"""
def __init__(self, TrainingData):
self.TrainingData = TrainingData
self.nFeatures = self.TrainingData.shape[1] - 1
self.data = TrainingData[:, 0:self.nFeatures].astype(float)
self.FeatureRange = []
self.normalize()
def normalize(self, weights=None):
if weights == None:
weights = np.ones(self.nFeatures)
for i in range(self.nFeatures):
mn = np.min(self.data[:, i])
self.data[:, i] -= mn
mx = np.max(self.data[:, i])
self.data[:, i] /= mx
self.FeatureRange.append([mn, mx])
def Check(self, pnt):
for i in range(self.nFeatures):
pnt[i] -= self.FeatureRange[i][0]
pnt[i] /= self.FeatureRange[i][1]
distances = []
for i in range(len(self.data)):
dist = np.linalg.norm(pnt - self.data[i])
distances.append(dist)
order = np.argsort(distances)
c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])
ans = c.most_common(3)
print(ans[0][0])
boop = Knn(TrainingData)
pnt = np.array([7.0, 3.2, 4.7, 1.85])
boop.Check(pnt)
<|reserved_special_token_1|>
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
TrainingData = np.loadtxt('Data2', delimiter=',', skiprows=1, dtype=str)
class Knn(object):
"""docstring for data"""
def __init__(self, TrainingData):
self.TrainingData = TrainingData
self.nFeatures = self.TrainingData.shape[1] - 1
self.data = TrainingData[:, 0:self.nFeatures].astype(float)
self.FeatureRange = []
self.normalize()
def normalize(self, weights=None):
if weights == None:
weights = np.ones(self.nFeatures)
for i in range(self.nFeatures):
mn = np.min(self.data[:, i])
self.data[:, i] -= mn
mx = np.max(self.data[:, i])
self.data[:, i] /= mx
self.FeatureRange.append([mn, mx])
def Check(self, pnt):
for i in range(self.nFeatures):
pnt[i] -= self.FeatureRange[i][0]
pnt[i] /= self.FeatureRange[i][1]
distances = []
for i in range(len(self.data)):
dist = np.linalg.norm(pnt - self.data[i])
distances.append(dist)
order = np.argsort(distances)
c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])
ans = c.most_common(3)
print(ans[0][0])
boop = Knn(TrainingData)
pnt = np.array([7.0, 3.2, 4.7, 1.85])
boop.Check(pnt)
<|reserved_special_token_1|>
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
TrainingData = np.loadtxt("Data2",delimiter = ',',skiprows = 1,dtype = str)
class Knn(object):
"""docstring for data"""
def __init__(self, TrainingData):
self.TrainingData = TrainingData
self.nFeatures = self.TrainingData.shape[1]-1
self.data = TrainingData[:,0:self.nFeatures].astype(float)
self.FeatureRange = []
self.normalize()
def normalize(self,weights = None):
if weights == None:
weights = np.ones(self.nFeatures)
for i in range(self.nFeatures):
mn = np.min(self.data[:,i])
self.data[:,i] -= mn
mx = np.max(self.data[:,i])
self.data[:,i] /= mx
self.FeatureRange.append([mn,mx])
def Check(self,pnt):
for i in range(self.nFeatures):
pnt[i] -= self.FeatureRange[i][0]
pnt[i] /= self.FeatureRange[i][1]
distances = []
for i in range(len(self.data)):
dist = np.linalg.norm(pnt-self.data[i])
distances.append(dist)
order = np.argsort(distances)
c = Counter(self.TrainingData[:,self.nFeatures][order][0:7])
ans = c.most_common(3)
print(ans[0][0])
boop = Knn(TrainingData)
pnt = np.array([7.0,3.2,4.7,1.85])
boop.Check(pnt)
|
flexible
|
{
"blob_id": "5e0affbd295d7237784cd8e72926afeda6456500",
"index": 7080,
"step-1": "<mask token>\n\n\nclass Knn(object):\n <mask token>\n\n def __init__(self, TrainingData):\n self.TrainingData = TrainingData\n self.nFeatures = self.TrainingData.shape[1] - 1\n self.data = TrainingData[:, 0:self.nFeatures].astype(float)\n self.FeatureRange = []\n self.normalize()\n\n def normalize(self, weights=None):\n if weights == None:\n weights = np.ones(self.nFeatures)\n for i in range(self.nFeatures):\n mn = np.min(self.data[:, i])\n self.data[:, i] -= mn\n mx = np.max(self.data[:, i])\n self.data[:, i] /= mx\n self.FeatureRange.append([mn, mx])\n\n def Check(self, pnt):\n for i in range(self.nFeatures):\n pnt[i] -= self.FeatureRange[i][0]\n pnt[i] /= self.FeatureRange[i][1]\n distances = []\n for i in range(len(self.data)):\n dist = np.linalg.norm(pnt - self.data[i])\n distances.append(dist)\n order = np.argsort(distances)\n c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])\n ans = c.most_common(3)\n print(ans[0][0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Knn(object):\n \"\"\"docstring for data\"\"\"\n\n def __init__(self, TrainingData):\n self.TrainingData = TrainingData\n self.nFeatures = self.TrainingData.shape[1] - 1\n self.data = TrainingData[:, 0:self.nFeatures].astype(float)\n self.FeatureRange = []\n self.normalize()\n\n def normalize(self, weights=None):\n if weights == None:\n weights = np.ones(self.nFeatures)\n for i in range(self.nFeatures):\n mn = np.min(self.data[:, i])\n self.data[:, i] -= mn\n mx = np.max(self.data[:, i])\n self.data[:, i] /= mx\n self.FeatureRange.append([mn, mx])\n\n def Check(self, pnt):\n for i in range(self.nFeatures):\n pnt[i] -= self.FeatureRange[i][0]\n pnt[i] /= self.FeatureRange[i][1]\n distances = []\n for i in range(len(self.data)):\n dist = np.linalg.norm(pnt - self.data[i])\n distances.append(dist)\n order = np.argsort(distances)\n c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])\n ans = c.most_common(3)\n print(ans[0][0])\n\n\n<mask token>\n",
"step-3": "<mask token>\nTrainingData = np.loadtxt('Data2', delimiter=',', skiprows=1, dtype=str)\n\n\nclass Knn(object):\n \"\"\"docstring for data\"\"\"\n\n def __init__(self, TrainingData):\n self.TrainingData = TrainingData\n self.nFeatures = self.TrainingData.shape[1] - 1\n self.data = TrainingData[:, 0:self.nFeatures].astype(float)\n self.FeatureRange = []\n self.normalize()\n\n def normalize(self, weights=None):\n if weights == None:\n weights = np.ones(self.nFeatures)\n for i in range(self.nFeatures):\n mn = np.min(self.data[:, i])\n self.data[:, i] -= mn\n mx = np.max(self.data[:, i])\n self.data[:, i] /= mx\n self.FeatureRange.append([mn, mx])\n\n def Check(self, pnt):\n for i in range(self.nFeatures):\n pnt[i] -= self.FeatureRange[i][0]\n pnt[i] /= self.FeatureRange[i][1]\n distances = []\n for i in range(len(self.data)):\n dist = np.linalg.norm(pnt - self.data[i])\n distances.append(dist)\n order = np.argsort(distances)\n c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])\n ans = c.most_common(3)\n print(ans[0][0])\n\n\nboop = Knn(TrainingData)\npnt = np.array([7.0, 3.2, 4.7, 1.85])\nboop.Check(pnt)\n",
"step-4": "import numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nTrainingData = np.loadtxt('Data2', delimiter=',', skiprows=1, dtype=str)\n\n\nclass Knn(object):\n \"\"\"docstring for data\"\"\"\n\n def __init__(self, TrainingData):\n self.TrainingData = TrainingData\n self.nFeatures = self.TrainingData.shape[1] - 1\n self.data = TrainingData[:, 0:self.nFeatures].astype(float)\n self.FeatureRange = []\n self.normalize()\n\n def normalize(self, weights=None):\n if weights == None:\n weights = np.ones(self.nFeatures)\n for i in range(self.nFeatures):\n mn = np.min(self.data[:, i])\n self.data[:, i] -= mn\n mx = np.max(self.data[:, i])\n self.data[:, i] /= mx\n self.FeatureRange.append([mn, mx])\n\n def Check(self, pnt):\n for i in range(self.nFeatures):\n pnt[i] -= self.FeatureRange[i][0]\n pnt[i] /= self.FeatureRange[i][1]\n distances = []\n for i in range(len(self.data)):\n dist = np.linalg.norm(pnt - self.data[i])\n distances.append(dist)\n order = np.argsort(distances)\n c = Counter(self.TrainingData[:, self.nFeatures][order][0:7])\n ans = c.most_common(3)\n print(ans[0][0])\n\n\nboop = Knn(TrainingData)\npnt = np.array([7.0, 3.2, 4.7, 1.85])\nboop.Check(pnt)\n",
"step-5": "import numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\n\n\n # 1. sepal length in cm\n # 2. sepal width in cm\n # 3. petal length in cm\n # 4. petal width in cm\nTrainingData = np.loadtxt(\"Data2\",delimiter = ',',skiprows = 1,dtype = str)\n\n\nclass Knn(object):\n\t\"\"\"docstring for data\"\"\"\n\tdef __init__(self, TrainingData):\n\t\tself.TrainingData = TrainingData\n\n\n\t\tself.nFeatures = self.TrainingData.shape[1]-1\n\t\tself.data = TrainingData[:,0:self.nFeatures].astype(float)\n\t\tself.FeatureRange = []\n\n\t\tself.normalize()\n\tdef normalize(self,weights = None):\n\t\tif weights == None:\n\t\t\tweights = np.ones(self.nFeatures)\n\t\tfor i in range(self.nFeatures):\n\n\t\t\tmn = np.min(self.data[:,i])\n\t\t\tself.data[:,i] -= mn\n\t\t\tmx = np.max(self.data[:,i])\n\t\t\tself.data[:,i] /= mx\n\n\t\t\tself.FeatureRange.append([mn,mx])\n\tdef Check(self,pnt):\n\t\tfor i in range(self.nFeatures):\n\t\t\tpnt[i] -= self.FeatureRange[i][0]\n\t\t\tpnt[i] /= self.FeatureRange[i][1]\n\n\t\tdistances = []\n\t\tfor i in range(len(self.data)):\n\t\t\tdist = np.linalg.norm(pnt-self.data[i])\n\t\t\tdistances.append(dist)\n\t\torder = np.argsort(distances)\n\t\tc = Counter(self.TrainingData[:,self.nFeatures][order][0:7])\n\t\tans = c.most_common(3)\n\t\tprint(ans[0][0])\n\n\n\n\n\nboop = Knn(TrainingData)\n\npnt = np.array([7.0,3.2,4.7,1.85])\nboop.Check(pnt)\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
# -*- coding: utf-8 -*-
class Library(object):
def __init__(self, backend):
self._backend = backend
@property
def cache(self):
return self._backend.cache
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
|
normal
|
{
"blob_id": "ccee0e3c47fd3809e0670be24aaa6fd0a9bad3bc",
"index": 888,
"step-1": "class Library(object):\n <mask token>\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n <mask token>\n",
"step-2": "class Library(object):\n <mask token>\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-3": "class Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n <mask token>\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-4": "class Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n\n @property\n def cache(self):\n return self._backend.cache\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nclass Library(object):\n\n def __init__(self, backend):\n self._backend = backend\n\n @property\n def cache(self):\n return self._backend.cache\n\n def cache_key(self, key):\n return self._backend.cache_key(key)\n\n def get_url(self, track):\n raise NotImplementedError()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
data_dir = "../data"
output_dir = './'
valid_id = dict()
for category in ("beauty", "fashion", "mobile"):
with open("%s/%s_data_info_val_competition.csv" % (data_dir, category), "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split(',')[0]
valid_id[curr_id] = True
# This is the new output submission file containing 977987 rows
with open("submission_977.csv", "w") as outfile:
outfile.write("id,tagging\n")
# Please change the file below to your current submission filename containing 1174802 rows
# with open("submission-in.csv", "r") as infile:
with open("%s/submission_2103.csv" % output_dir, "r") as infile:
next(infile)
for line in infile:
curr_id = line.strip().split('_')[0]
if curr_id in valid_id:
outfile.write(line.strip() + '\n')
|
normal
|
{
"blob_id": "82556291c456b9e43e4e589ea4a77d320430344b",
"index": 7478,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor category in ('beauty', 'fashion', 'mobile'):\n with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'\n ) as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\nwith open('submission_977.csv', 'w') as outfile:\n outfile.write('id,tagging\\n')\n with open('%s/submission_2103.csv' % output_dir, 'r') as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')\n",
"step-3": "data_dir = '../data'\noutput_dir = './'\nvalid_id = dict()\nfor category in ('beauty', 'fashion', 'mobile'):\n with open('%s/%s_data_info_val_competition.csv' % (data_dir, category), 'r'\n ) as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\nwith open('submission_977.csv', 'w') as outfile:\n outfile.write('id,tagging\\n')\n with open('%s/submission_2103.csv' % output_dir, 'r') as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')\n",
"step-4": "data_dir = \"../data\"\noutput_dir = './'\nvalid_id = dict()\n\nfor category in (\"beauty\", \"fashion\", \"mobile\"):\n with open(\"%s/%s_data_info_val_competition.csv\" % (data_dir, category), \"r\") as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split(',')[0]\n valid_id[curr_id] = True\n\n# This is the new output submission file containing 977987 rows\nwith open(\"submission_977.csv\", \"w\") as outfile:\n outfile.write(\"id,tagging\\n\")\n \n # Please change the file below to your current submission filename containing 1174802 rows\n # with open(\"submission-in.csv\", \"r\") as infile:\n with open(\"%s/submission_2103.csv\" % output_dir, \"r\") as infile:\n next(infile)\n for line in infile:\n curr_id = line.strip().split('_')[0]\n if curr_id in valid_id:\n outfile.write(line.strip() + '\\n')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_choice_times(behavior_filename, verbose=False):
"""Calculates the choice time for each trial in the logfile"""
state_num2names = MCwatch.behavior.db.get_state_num2names()
resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[
'RESPONSE_WINDOW']
lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)
parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(
lines, verbose=verbose)
choice_times = ArduFSM.TrialSpeak.identify_state_change_times(
parsed_df_by_trial, state0=resp_win_num, show_warnings=False)
return choice_times
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_choice_times(behavior_filename, verbose=False):
"""Calculates the choice time for each trial in the logfile"""
state_num2names = MCwatch.behavior.db.get_state_num2names()
resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[
'RESPONSE_WINDOW']
lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)
parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(
lines, verbose=verbose)
choice_times = ArduFSM.TrialSpeak.identify_state_change_times(
parsed_df_by_trial, state0=resp_win_num, show_warnings=False)
return choice_times
def get_included_trials(trial_times, data_range, t_start=0, t_stop=0):
"""Identify the trials included in a temporal range.
trial_times : Series of trial times (e.g., rwin times) indexed by
trial labels
data_range : 2-tuple (start, stop) specifying interval to include
t_start, t_stop : amount of time before (after) each trial time that
must be within data_range in order for that trial to be included.
Returns: trial_labels that are included
Ex:
## Get the trial matrix
tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)
# Include all random trials
tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])
# Identify range of trials to include
video_range_bbase = extras.get_video_range_bbase(vs)
included_trials = extras.get_included_trials(tm['rwin_time'],
data_range=video_range_bbase, t_start=-2, t_stop=0)
tm = tm.loc[included_trials]
"""
return trial_times[(trial_times + t_start >= data_range[0]) & (
trial_times + t_stop < data_range[1])].index
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import MCwatch
import ArduFSM
import numpy as np
def get_choice_times(behavior_filename, verbose=False):
"""Calculates the choice time for each trial in the logfile"""
state_num2names = MCwatch.behavior.db.get_state_num2names()
resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[
'RESPONSE_WINDOW']
lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)
parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(
lines, verbose=verbose)
choice_times = ArduFSM.TrialSpeak.identify_state_change_times(
parsed_df_by_trial, state0=resp_win_num, show_warnings=False)
return choice_times
def get_included_trials(trial_times, data_range, t_start=0, t_stop=0):
"""Identify the trials included in a temporal range.
trial_times : Series of trial times (e.g., rwin times) indexed by
trial labels
data_range : 2-tuple (start, stop) specifying interval to include
t_start, t_stop : amount of time before (after) each trial time that
must be within data_range in order for that trial to be included.
Returns: trial_labels that are included
Ex:
## Get the trial matrix
tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)
# Include all random trials
tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])
# Identify range of trials to include
video_range_bbase = extras.get_video_range_bbase(vs)
included_trials = extras.get_included_trials(tm['rwin_time'],
data_range=video_range_bbase, t_start=-2, t_stop=0)
tm = tm.loc[included_trials]
"""
return trial_times[(trial_times + t_start >= data_range[0]) & (
trial_times + t_stop < data_range[1])].index
<|reserved_special_token_1|>
""""Module for miscellaneous behavior stuff
For example, stuff like extracting lick times or choice times.
TrialSpeak shouldn't depend on stuff like that.
# Also get the pldf and use that to get lick times
ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename'])
# Get the lick times
lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH')
# Group them by trial number and lick type and extract times
tt2licks = lick_times.groupby(['trial', 'arg0']).groups
for (trial, lick_type) in tt2licks:
tt2licks[(trial, lick_type)] = \
ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000.
# Get response window time as first transition into response window
state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(
ldf, 'ST_CHG2')
rwin_open_times = my.pick_rows(state_change_df,
arg1=state_name2num['RESPONSE_WINDOW'])
rwin_open_times_by_trial = rwin_open_times.groupby(
'trial').first()['time'] / 1000.
# Get choice time as first transition out of response window
state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(
ldf, 'ST_CHG2')
rwin_close_times = my.pick_rows(state_change_df,
arg0=state_name2num['RESPONSE_WINDOW'])
rwin_close_times_by_trial = rwin_close_times.groupby(
'trial').first()['time'] / 1000.
"""
import MCwatch
import ArduFSM
import numpy as np
def get_choice_times(behavior_filename, verbose=False):
"""Calculates the choice time for each trial in the logfile"""
# Find the state number for response window
state_num2names = MCwatch.behavior.db.get_state_num2names()
resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[
'RESPONSE_WINDOW']
# Get the lines
lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)
parsed_df_by_trial = \
ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines,
verbose=verbose)
# Identify times of state change out of response window
# No sense in warning because there's also multiple state changes on
# rewarded trials
choice_times = ArduFSM.TrialSpeak.identify_state_change_times(
parsed_df_by_trial, state0=resp_win_num, show_warnings=False)
return choice_times
def get_included_trials(trial_times, data_range, t_start=0, t_stop=0):
"""Identify the trials included in a temporal range.
trial_times : Series of trial times (e.g., rwin times) indexed by
trial labels
data_range : 2-tuple (start, stop) specifying interval to include
t_start, t_stop : amount of time before (after) each trial time that
must be within data_range in order for that trial to be included.
Returns: trial_labels that are included
Ex:
## Get the trial matrix
tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)
# Include all random trials
tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])
# Identify range of trials to include
video_range_bbase = extras.get_video_range_bbase(vs)
included_trials = extras.get_included_trials(tm['rwin_time'],
data_range=video_range_bbase, t_start=-2, t_stop=0)
tm = tm.loc[included_trials]
"""
return trial_times[
(trial_times + t_start >= data_range[0]) &
(trial_times + t_stop < data_range[1])
].index
|
flexible
|
{
"blob_id": "78761eda403ad8f54187e5858a23c23d3dd79b09",
"index": 8821,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[(trial_times + t_start >= data_range[0]) & (\n trial_times + t_stop < data_range[1])].index\n",
"step-4": "<mask token>\nimport MCwatch\nimport ArduFSM\nimport numpy as np\n\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n state_num2names = MCwatch.behavior.db.get_state_num2names()\n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(\n lines, verbose=verbose)\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n return choice_times\n\n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[(trial_times + t_start >= data_range[0]) & (\n trial_times + t_stop < data_range[1])].index\n",
"step-5": "\"\"\"\"Module for miscellaneous behavior stuff\n\nFor example, stuff like extracting lick times or choice times.\nTrialSpeak shouldn't depend on stuff like that.\n\n\n # Also get the pldf and use that to get lick times\n ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename']) \n \n # Get the lick times\n lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH')\n \n # Group them by trial number and lick type and extract times\n tt2licks = lick_times.groupby(['trial', 'arg0']).groups\n for (trial, lick_type) in tt2licks:\n tt2licks[(trial, lick_type)] = \\\n ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000.\n \n # Get response window time as first transition into response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_open_times = my.pick_rows(state_change_df, \n arg1=state_name2num['RESPONSE_WINDOW'])\n rwin_open_times_by_trial = rwin_open_times.groupby(\n 'trial').first()['time'] / 1000.\n \n # Get choice time as first transition out of response window\n state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(\n ldf, 'ST_CHG2')\n rwin_close_times = my.pick_rows(state_change_df, \n arg0=state_name2num['RESPONSE_WINDOW'])\n rwin_close_times_by_trial = rwin_close_times.groupby(\n 'trial').first()['time'] / 1000.\n\"\"\"\nimport MCwatch\nimport ArduFSM\nimport numpy as np\n\ndef get_choice_times(behavior_filename, verbose=False):\n \"\"\"Calculates the choice time for each trial in the logfile\"\"\"\n # Find the state number for response window\n state_num2names = MCwatch.behavior.db.get_state_num2names() \n resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[\n 'RESPONSE_WINDOW']\n \n # Get the lines\n lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)\n parsed_df_by_trial = \\\n ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines, \n verbose=verbose)\n \n # Identify times of state change out of response window\n # No sense in warning because there's also multiple state changes on\n # rewarded trials\n choice_times = ArduFSM.TrialSpeak.identify_state_change_times(\n parsed_df_by_trial, state0=resp_win_num, show_warnings=False)\n \n return choice_times \n\ndef get_included_trials(trial_times, data_range, t_start=0, t_stop=0):\n \"\"\"Identify the trials included in a temporal range.\n \n trial_times : Series of trial times (e.g., rwin times) indexed by\n trial labels\n \n data_range : 2-tuple (start, stop) specifying interval to include\n \n t_start, t_stop : amount of time before (after) each trial time that\n must be within data_range in order for that trial to be included.\n \n Returns: trial_labels that are included \n\n Ex:\n ## Get the trial matrix\n tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)\n\n # Include all random trials\n tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])\n \n # Identify range of trials to include\n video_range_bbase = extras.get_video_range_bbase(vs)\n included_trials = extras.get_included_trials(tm['rwin_time'], \n data_range=video_range_bbase, t_start=-2, t_stop=0)\n tm = tm.loc[included_trials]\n \"\"\"\n return trial_times[\n (trial_times + t_start >= data_range[0]) &\n (trial_times + t_stop < data_range[1])\n ].index\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .routes import generate_routes
|
normal
|
{
"blob_id": "06339e9cd506f147d03c54aee82473e233b4ec2e",
"index": 8853,
"step-1": "<mask token>\n",
"step-2": "from .routes import generate_routes\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.shortcuts import render
from django.http import Http404
from thermometer.models import Therm
def index(request):
therms = Therm.objects.all()
return render(request, 'thermometer/index.html', {
'therms': therms,
})
def fetchsquare(request, id):
try:
therm = Therm.objects.get(id=id)
except Therm.DoesNotExist:
raise Http404('This item does not exist')
return render(request, 'thermometer/fetchsquare.html', {
'therm': therm,
})
|
normal
|
{
"blob_id": "504d4afc4b3e708d43110a2d85676fb745f1aba8",
"index": 9874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetchsquare(request, id):\n try:\n therm = Therm.objects.get(id=id)\n except Therm.DoesNotExist:\n raise Http404('This item does not exist')\n return render(request, 'thermometer/fetchsquare.html', {'therm': therm})\n",
"step-3": "<mask token>\n\n\ndef index(request):\n therms = Therm.objects.all()\n return render(request, 'thermometer/index.html', {'therms': therms})\n\n\ndef fetchsquare(request, id):\n try:\n therm = Therm.objects.get(id=id)\n except Therm.DoesNotExist:\n raise Http404('This item does not exist')\n return render(request, 'thermometer/fetchsquare.html', {'therm': therm})\n",
"step-4": "from django.shortcuts import render\nfrom django.http import Http404\nfrom thermometer.models import Therm\n\n\ndef index(request):\n therms = Therm.objects.all()\n return render(request, 'thermometer/index.html', {'therms': therms})\n\n\ndef fetchsquare(request, id):\n try:\n therm = Therm.objects.get(id=id)\n except Therm.DoesNotExist:\n raise Http404('This item does not exist')\n return render(request, 'thermometer/fetchsquare.html', {'therm': therm})\n",
"step-5": "from django.shortcuts import render\nfrom django.http import Http404\n\nfrom thermometer.models import Therm\n\ndef index(request):\n\ttherms = Therm.objects.all()\n\treturn render(request, 'thermometer/index.html', {\n\t\t'therms': therms,\n\t})\n\ndef fetchsquare(request, id):\n\ttry:\n\t\ttherm = Therm.objects.get(id=id)\n\texcept Therm.DoesNotExist:\n\t\traise Http404('This item does not exist')\n\treturn render(request, 'thermometer/fetchsquare.html', {\n\t\t'therm': therm,\n\t})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
generations = 100
for generation in range(generations):
population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)
print('Population\n', population, end='\n\n')
cov19 = FuzzyNet()
print('Base Z matrix\n', cov19.Z, end='\n\n')
population_fa, population_matrix_z, best_FA_index = cov19.get_FA(
population=population)
if population_fa[best_FA_index] < 40:
print('Best Z matrix\n', population_matrix_z[best_FA_index],
population_fa[best_FA_index])
cov19.plot(population_matrix_z[best_FA_index])
time.sleep(5)
<|reserved_special_token_1|>
from covid import FuzzyNet
import numpy as np
import time
if __name__ == '__main__':
generations = 100
for generation in range(generations):
population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)
print('Population\n', population, end='\n\n')
cov19 = FuzzyNet()
print('Base Z matrix\n', cov19.Z, end='\n\n')
population_fa, population_matrix_z, best_FA_index = cov19.get_FA(
population=population)
if population_fa[best_FA_index] < 40:
print('Best Z matrix\n', population_matrix_z[best_FA_index],
population_fa[best_FA_index])
cov19.plot(population_matrix_z[best_FA_index])
time.sleep(5)
<|reserved_special_token_1|>
from covid import FuzzyNet
import numpy as np
import time
if __name__ == '__main__':
# mx1,mx2,mx3,my1,my2,my3, dx1,dx2,dx3,dy1,dy2,dy3, p1,p2,p3,p4,p5,p6,p7,p8,p9, q1,q2,q3,q4,q5,q6,q7,q8,q9, r1,r2,r3,r4,r5,r6,r7,r8,r9
generations = 100
for generation in range(generations):
population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)
print('Population\n', population, end='\n\n')
cov19 = FuzzyNet()
print('Base Z matrix\n', cov19.Z, end='\n\n')
population_fa, population_matrix_z, best_FA_index = cov19.get_FA(population=population)
if population_fa[best_FA_index] < 40:
print('Best Z matrix\n', population_matrix_z[best_FA_index], population_fa[best_FA_index])
cov19.plot(population_matrix_z[best_FA_index])
time.sleep(5)
# xi , yj = zij
|
flexible
|
{
"blob_id": "99f50d393e750bd8fa5bee21d99f08d20b9f5fe9",
"index": 9102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n generations = 100\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(\n population=population)\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index],\n population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n",
"step-3": "from covid import FuzzyNet\nimport numpy as np\nimport time\nif __name__ == '__main__':\n generations = 100\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(\n population=population)\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index],\n population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n",
"step-4": "from covid import FuzzyNet\nimport numpy as np\nimport time\n\n\nif __name__ == '__main__':\n # mx1,mx2,mx3,my1,my2,my3, dx1,dx2,dx3,dy1,dy2,dy3, p1,p2,p3,p4,p5,p6,p7,p8,p9, q1,q2,q3,q4,q5,q6,q7,q8,q9, r1,r2,r3,r4,r5,r6,r7,r8,r9\n generations = 100\n\n for generation in range(generations):\n population = np.random.randint(0, 255, size=(200, 39), dtype=np.uint8)\n print('Population\\n', population, end='\\n\\n')\n\n cov19 = FuzzyNet()\n print('Base Z matrix\\n', cov19.Z, end='\\n\\n')\n population_fa, population_matrix_z, best_FA_index = cov19.get_FA(population=population)\n\n if population_fa[best_FA_index] < 40:\n print('Best Z matrix\\n', population_matrix_z[best_FA_index], population_fa[best_FA_index])\n cov19.plot(population_matrix_z[best_FA_index])\n time.sleep(5)\n\n# xi , yj = zij",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class City(BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class City(BaseModel):
<|reserved_special_token_0|>
state_id = ''
name = ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class City(BaseModel):
"""City Class
Public class attributes:
state_d: type string
name: type string
"""
state_id = ''
name = ''
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from models.base_model import BaseModel
class City(BaseModel):
"""City Class
Public class attributes:
state_d: type string
name: type string
"""
state_id = ''
name = ''
<|reserved_special_token_1|>
#!/usr/bin/python3
"""City Module"""
from models.base_model import BaseModel
class City(BaseModel):
"""City Class
Public class attributes:
state_d: type string
name: type string
"""
state_id = ""
name = ""
|
flexible
|
{
"blob_id": "3f2c1a83ae0dfdba202038a209b90162ccddee36",
"index": 6115,
"step-1": "<mask token>\n\n\nclass City(BaseModel):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass City(BaseModel):\n <mask token>\n state_id = ''\n name = ''\n",
"step-3": "<mask token>\n\n\nclass City(BaseModel):\n \"\"\"City Class\n Public class attributes:\n state_d: type string\n name: type string\n \"\"\"\n state_id = ''\n name = ''\n",
"step-4": "<mask token>\nfrom models.base_model import BaseModel\n\n\nclass City(BaseModel):\n \"\"\"City Class\n Public class attributes:\n state_d: type string\n name: type string\n \"\"\"\n state_id = ''\n name = ''\n",
"step-5": "#!/usr/bin/python3\n\"\"\"City Module\"\"\"\nfrom models.base_model import BaseModel\n\n\nclass City(BaseModel):\n \"\"\"City Class\n Public class attributes:\n state_d: type string\n name: type string\n \"\"\"\n state_id = \"\"\n name = \"\"\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.contrib.auth.models import User
from django_filters import (
NumberFilter,
DateTimeFilter,
AllValuesFilter
)
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import permissions
from rest_framework.throttling import ScopedRateThrottle
from rest_framework import filters
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
GenericAPIView,
ListAPIView,
RetrieveAPIView
)
from games.models import (
GameCategory,
Game,
Player,
PlayerScore
)
from games.serializers import (
GameCategorySerializer,
GameSerializer,
PlayerSerializer,
PlayerScoreSerializer,
)
from games.serializers import UserSerializer
from games.permissions import IsOwnerOrReadOnly
class ApiRoot(GenericAPIView):
name= 'api-root'
def get(self,request,*args,**kwargs):
return Response(
{
'players':reverse(PlayerList.name,request=request),
'game-categories':reverse(GameCategoryList.name,request=request),
'game':reverse(GameList.name,request=request),
'scores':reverse(PlayerScoreList.name,request=request),
'users': reverse(UserList.name,request=request)
}
)
class GameCategoryList(ListCreateAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-list'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
filter_fields = ('name',)
search_fields = ('^name',)
ordering_fields = ('name',)
class GameCategoryDetail(RetrieveUpdateDestroyAPIView):
queryset = GameCategory.objects.all()
serializer_class = GameCategorySerializer
name = 'gamecategory-detail'
throttle_scope = 'game-categories'
throttle_classes = (ScopedRateThrottle,)
class GameList(ListCreateAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-list'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
filter_fields = (
'name',
'game_category',
'release_date',
'played',
'owner',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
'release_date',
)
def perform_create(self, serializer):
# pass an additional owner field to the create method
# to set the owner to the user recieved in the request
serializer.save(owner=self.request.user)
class GameDetail(RetrieveUpdateDestroyAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
name = 'game-detail'
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly
)
class PlayerList(ListCreateAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-list'
filter_fields = (
'name',
'gender',
)
search_fields = (
'^name',
)
ordering_fields = (
'name',
)
class PlayerDetail(RetrieveUpdateDestroyAPIView):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
name = 'player-detail'
class PlayerScoreFilter(filters.FilterSet):
min_score = NumberFilter(
name='score',lookup_expr='gte'
)
max_score = NumberFilter(
name='score',lookup_expr='lte'
)
from_score_date = DateTimeFilter(
name='score_date',
lookup_expr='gte'
)
to_score_date = DateTimeFilter(
name='score_date',
lookup_expr='lte'
)
player_name = AllValuesFilter(
name='player__name'
)
game_name = AllValuesFilter(
name= 'game__name'
)
class Meta:
model = PlayerScore
fields = (
'score',
'from_score_date',
'to_score_date',
'min_score',
'max_score',
# player__name will be accessed as player_name
'player_name',
#game__name will be accessed as game_name
'game_name'
)
class PlayerScoreList(ListCreateAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-list'
filter_class =PlayerScoreFilter
ordering_fields = (
'score',
'score_date',
)
class PlayerScoreDetail(RetrieveUpdateDestroyAPIView):
queryset = PlayerScore.objects.all()
serializer_class = PlayerScoreSerializer
name = 'playerscore-detail'
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-list'
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
name = 'user-detail'
|
normal
|
{
"blob_id": "2908d34165fac272c9571be623855a0613c952f3",
"index": 5433,
"step-1": "<mask token>\n\n\nclass GameList(ListCreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-2": "<mask token>\n\n\nclass ApiRoot(GenericAPIView):\n <mask token>\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-3": "<mask token>\n\n\nclass ApiRoot(GenericAPIView):\n name = 'api-root'\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-4": "from django.contrib.auth.models import User\nfrom django_filters import NumberFilter, DateTimeFilter, AllValuesFilter\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework import permissions\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom rest_framework import filters\nfrom rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, GenericAPIView, ListAPIView, RetrieveAPIView\nfrom games.models import GameCategory, Game, Player, PlayerScore\nfrom games.serializers import GameCategorySerializer, GameSerializer, PlayerSerializer, PlayerScoreSerializer\nfrom games.serializers import UserSerializer\nfrom games.permissions import IsOwnerOrReadOnly\n\n\nclass ApiRoot(GenericAPIView):\n name = 'api-root'\n\n def get(self, request, *args, **kwargs):\n return Response({'players': reverse(PlayerList.name, request=\n request), 'game-categories': reverse(GameCategoryList.name,\n request=request), 'game': reverse(GameList.name, request=\n request), 'scores': reverse(PlayerScoreList.name, request=\n request), 'users': reverse(UserList.name, request=request)})\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n filter_fields = 'name',\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = ScopedRateThrottle,\n\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n filter_fields = 'name', 'game_category', 'release_date', 'played', 'owner'\n search_fields = '^name',\n ordering_fields = 'name', 'release_date'\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly)\n\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = 'name', 'gender'\n search_fields = '^name',\n ordering_fields = 'name',\n\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(name='score', lookup_expr='gte')\n max_score = NumberFilter(name='score', lookup_expr='lte')\n from_score_date = DateTimeFilter(name='score_date', lookup_expr='gte')\n to_score_date = DateTimeFilter(name='score_date', lookup_expr='lte')\n player_name = AllValuesFilter(name='player__name')\n game_name = AllValuesFilter(name='game__name')\n\n\n class Meta:\n model = PlayerScore\n fields = ('score', 'from_score_date', 'to_score_date', 'min_score',\n 'max_score', 'player_name', 'game_name')\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class = PlayerScoreFilter\n ordering_fields = 'score', 'score_date'\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n",
"step-5": "from django.contrib.auth.models import User\nfrom django_filters import (\n NumberFilter,\n DateTimeFilter,\n AllValuesFilter\n)\n\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework import permissions\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom rest_framework import filters\n\nfrom rest_framework.generics import (\n ListCreateAPIView,\n RetrieveUpdateDestroyAPIView,\n GenericAPIView,\n ListAPIView,\n RetrieveAPIView\n)\n\nfrom games.models import (\n GameCategory,\n Game,\n Player,\n PlayerScore\n)\n\nfrom games.serializers import (\n GameCategorySerializer,\n GameSerializer,\n PlayerSerializer,\n PlayerScoreSerializer,\n)\n\nfrom games.serializers import UserSerializer\nfrom games.permissions import IsOwnerOrReadOnly\n\n\n\n\nclass ApiRoot(GenericAPIView):\n name= 'api-root'\n\n def get(self,request,*args,**kwargs):\n return Response(\n {\n 'players':reverse(PlayerList.name,request=request),\n 'game-categories':reverse(GameCategoryList.name,request=request),\n 'game':reverse(GameList.name,request=request),\n 'scores':reverse(PlayerScoreList.name,request=request),\n 'users': reverse(UserList.name,request=request)\n }\n )\n\n\n\n\nclass GameCategoryList(ListCreateAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-list'\n throttle_scope = 'game-categories'\n throttle_classes = (ScopedRateThrottle,)\n filter_fields = ('name',)\n search_fields = ('^name',)\n ordering_fields = ('name',)\n\nclass GameCategoryDetail(RetrieveUpdateDestroyAPIView):\n queryset = GameCategory.objects.all()\n serializer_class = GameCategorySerializer\n name = 'gamecategory-detail'\n throttle_scope = 'game-categories'\n throttle_classes = (ScopedRateThrottle,)\n\nclass GameList(ListCreateAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-list'\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly\n )\n filter_fields = (\n 'name',\n 'game_category',\n 'release_date',\n 'played',\n 'owner',\n )\n search_fields = (\n '^name',\n )\n ordering_fields = (\n 'name',\n 'release_date',\n )\n\n def perform_create(self, serializer):\n # pass an additional owner field to the create method\n # to set the owner to the user recieved in the request\n serializer.save(owner=self.request.user)\n\nclass GameDetail(RetrieveUpdateDestroyAPIView):\n queryset = Game.objects.all()\n serializer_class = GameSerializer\n name = 'game-detail'\n\n permission_classes = (\n permissions.IsAuthenticatedOrReadOnly,\n IsOwnerOrReadOnly\n )\n\nclass PlayerList(ListCreateAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-list'\n filter_fields = (\n 'name',\n 'gender',\n )\n search_fields = (\n '^name',\n )\n ordering_fields = (\n 'name',\n )\n\nclass PlayerDetail(RetrieveUpdateDestroyAPIView):\n queryset = Player.objects.all()\n serializer_class = PlayerSerializer\n name = 'player-detail'\n\n\nclass PlayerScoreFilter(filters.FilterSet):\n min_score = NumberFilter(\n name='score',lookup_expr='gte'\n )\n max_score = NumberFilter(\n name='score',lookup_expr='lte'\n )\n from_score_date = DateTimeFilter(\n name='score_date',\n lookup_expr='gte'\n )\n to_score_date = DateTimeFilter(\n name='score_date',\n lookup_expr='lte'\n )\n player_name = AllValuesFilter(\n name='player__name'\n )\n game_name = AllValuesFilter(\n name= 'game__name'\n )\n\n class Meta:\n model = PlayerScore\n fields = (\n 'score',\n 'from_score_date',\n 'to_score_date',\n 'min_score',\n 'max_score',\n # player__name will be accessed as player_name\n 'player_name',\n #game__name will be accessed as game_name\n 'game_name'\n )\n\n\n\nclass PlayerScoreList(ListCreateAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-list'\n filter_class =PlayerScoreFilter\n ordering_fields = (\n 'score',\n 'score_date',\n )\n\n\nclass PlayerScoreDetail(RetrieveUpdateDestroyAPIView):\n queryset = PlayerScore.objects.all()\n serializer_class = PlayerScoreSerializer\n name = 'playerscore-detail'\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-list'\n\nclass UserDetail(RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n name = 'user-detail'\n\n\n",
"step-ids": [
18,
25,
26,
27,
28
]
}
|
[
18,
25,
26,
27,
28
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
c.execute('SELECT * FROM example')
<|reserved_special_token_0|>
print('Content-Type:text/html; charset=utf-8')
print()
for i in records1.split('\n'):
print(i)
for i in records_dyn:
print(i)
for i in records1.split('\n'):
print(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
form = cgi.FieldStorage()
c.execute('SELECT * FROM example')
recs = c.fetchall()
records1 = """
<body>
<table>
<tbody>
<tr>
<th>Full Name</th>
<th>Average Score</th>
</tr>"""
records_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[
2] in recs]
records2 = """
<form method="POST" action="index.py">
<input type="submit" value="Go Back">
</form>
</body>
</table>
</body>
</html>"""
print('Content-Type:text/html; charset=utf-8')
print()
for i in records1.split('\n'):
print(i)
for i in records_dyn:
print(i)
for i in records1.split('\n'):
print(i)
<|reserved_special_token_1|>
import cgitb
import cgi
import pymysql
form = cgi.FieldStorage()
c.execute('SELECT * FROM example')
recs = c.fetchall()
records1 = """
<body>
<table>
<tbody>
<tr>
<th>Full Name</th>
<th>Average Score</th>
</tr>"""
records_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[
2] in recs]
records2 = """
<form method="POST" action="index.py">
<input type="submit" value="Go Back">
</form>
</body>
</table>
</body>
</html>"""
print('Content-Type:text/html; charset=utf-8')
print()
for i in records1.split('\n'):
print(i)
for i in records_dyn:
print(i)
for i in records1.split('\n'):
print(i)
<|reserved_special_token_1|>
#!/usr/bin/env python
import cgitb
import cgi
import pymysql
form = cgi.FieldStorage()
c.execute("SELECT * FROM example")
recs = c.fetchall()
records1 = """
<body>
<table>
<tbody>
<tr>
<th>Full Name</th>
<th>Average Score</th>
</tr>"""
records_dyn = [
f"<tr><td>{name}</td><td>{avg}</td></tr>" for recs[1], recs[2] in recs]
records2 = """
<form method="POST" action="index.py">
<input type="submit" value="Go Back">
</form>
</body>
</table>
</body>
</html>"""
print("Content-Type:text/html; charset=utf-8")
print()
for i in records1.split("\n"):
print(i)
for i in records_dyn:
print(i)
for i in records1.split("\n"):
print(i)
|
flexible
|
{
"blob_id": "b5fee01582a28085983c56b9c266ef7fd5c3c927",
"index": 5132,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.execute('SELECT * FROM example')\n<mask token>\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n",
"step-3": "<mask token>\nform = cgi.FieldStorage()\nc.execute('SELECT * FROM example')\nrecs = c.fetchall()\nrecords1 = \"\"\"\n<body>\n\t<table>\n\t\t<tbody>\n\t\t\t<tr>\n\t\t\t\t<th>Full Name</th>\n\t\t\t\t<th>Average Score</th>\n\t\t\t</tr>\"\"\"\nrecords_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[\n 2] in recs]\nrecords2 = \"\"\"\n<form method=\"POST\" action=\"index.py\">\n<input type=\"submit\" value=\"Go Back\">\n</form>\n\t\t</body>\n\t</table>\n</body>\n</html>\"\"\"\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n",
"step-4": "import cgitb\nimport cgi\nimport pymysql\nform = cgi.FieldStorage()\nc.execute('SELECT * FROM example')\nrecs = c.fetchall()\nrecords1 = \"\"\"\n<body>\n\t<table>\n\t\t<tbody>\n\t\t\t<tr>\n\t\t\t\t<th>Full Name</th>\n\t\t\t\t<th>Average Score</th>\n\t\t\t</tr>\"\"\"\nrecords_dyn = [f'<tr><td>{name}</td><td>{avg}</td></tr>' for recs[1], recs[\n 2] in recs]\nrecords2 = \"\"\"\n<form method=\"POST\" action=\"index.py\">\n<input type=\"submit\" value=\"Go Back\">\n</form>\n\t\t</body>\n\t</table>\n</body>\n</html>\"\"\"\nprint('Content-Type:text/html; charset=utf-8')\nprint()\nfor i in records1.split('\\n'):\n print(i)\nfor i in records_dyn:\n print(i)\nfor i in records1.split('\\n'):\n print(i)\n",
"step-5": "#!/usr/bin/env python\r\nimport cgitb\r\nimport cgi\r\nimport pymysql\r\n\r\nform = cgi.FieldStorage()\r\nc.execute(\"SELECT * FROM example\")\r\nrecs = c.fetchall()\r\nrecords1 = \"\"\"\r\n<body>\r\n\t<table>\r\n\t\t<tbody>\r\n\t\t\t<tr>\r\n\t\t\t\t<th>Full Name</th>\r\n\t\t\t\t<th>Average Score</th>\r\n\t\t\t</tr>\"\"\"\r\nrecords_dyn = [\r\n f\"<tr><td>{name}</td><td>{avg}</td></tr>\" for recs[1], recs[2] in recs]\r\nrecords2 = \"\"\"\r\n<form method=\"POST\" action=\"index.py\">\r\n<input type=\"submit\" value=\"Go Back\">\r\n</form>\r\n\t\t</body>\r\n\t</table>\r\n</body>\r\n</html>\"\"\"\r\nprint(\"Content-Type:text/html; charset=utf-8\")\r\nprint()\r\nfor i in records1.split(\"\\n\"):\r\n print(i)\r\nfor i in records_dyn:\r\n print(i)\r\nfor i in records1.split(\"\\n\"):\r\n print(i)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .exenv import *
|
normal
|
{
"blob_id": "9fea76b1612bd02f512072692090f8ef60e8a0fe",
"index": 1498,
"step-1": "<mask token>\n",
"step-2": "from .exenv import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.