index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
99,500 |
16e0b88617a0ff0d15b3089e059665c38e6379da
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# 编写程序询问长方形房间的尺寸(单位是米),然后计算覆盖整个房间总共需要多少地毯,并显示出来
length = float(raw_input("请输入房间的长度:"))
width = float(raw_input("请输入房间的宽度:"))
area = length * width
print("总共需要" + str(area) + "平方米地毯。")
# 询问每平方尺地毯的价格,并显示一下内容
# 总共需要多少地毯,单位是平方米
# 总共需要多少地毯,单位是平方尺
# 地毯的总价格
print("总共需要" + str(area * 9) + "平方尺地毯。")
priceOfCarpet = float(raw_input("请问每平方尺地毯的价格:"))
print("地毯的总价是:" + str(priceOfCarpet * area * 9))
# 统计客户的零钱
countOfFiveCents = int(raw_input("有多少个五分币?"))
countOfTwoCents = int(raw_input("有多少个二分币?"))
countOfOneCents = int(raw_input("有多少个一分币?"))
total = 5 * countOfFiveCents + 2 * countOfTwoCents + countOfOneCents
print("总面值:" + str(total) + "分")
|
[
"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# 编写程序询问长方形房间的尺寸(单位是米),然后计算覆盖整个房间总共需要多少地毯,并显示出来\nlength = float(raw_input(\"请输入房间的长度:\"))\nwidth = float(raw_input(\"请输入房间的宽度:\"))\narea = length * width\nprint(\"总共需要\" + str(area) + \"平方米地毯。\")\n\n# 询问每平方尺地毯的价格,并显示一下内容\n# 总共需要多少地毯,单位是平方米\n# 总共需要多少地毯,单位是平方尺\n# 地毯的总价格\n\nprint(\"总共需要\" + str(area * 9) + \"平方尺地毯。\")\npriceOfCarpet = float(raw_input(\"请问每平方尺地毯的价格:\"))\nprint(\"地毯的总价是:\" + str(priceOfCarpet * area * 9))\n\n# 统计客户的零钱\ncountOfFiveCents = int(raw_input(\"有多少个五分币?\"))\ncountOfTwoCents = int(raw_input(\"有多少个二分币?\"))\ncountOfOneCents = int(raw_input(\"有多少个一分币?\"))\ntotal = 5 * countOfFiveCents + 2 * countOfTwoCents + countOfOneCents\nprint(\"总面值:\" + str(total) + \"分\")\n",
"length = float(raw_input('请输入房间的长度:'))\nwidth = float(raw_input('请输入房间的宽度:'))\narea = length * width\nprint('总共需要' + str(area) + '平方米地毯。')\nprint('总共需要' + str(area * 9) + '平方尺地毯。')\npriceOfCarpet = float(raw_input('请问每平方尺地毯的价格:'))\nprint('地毯的总价是:' + str(priceOfCarpet * area * 9))\ncountOfFiveCents = int(raw_input('有多少个五分币?'))\ncountOfTwoCents = int(raw_input('有多少个二分币?'))\ncountOfOneCents = int(raw_input('有多少个一分币?'))\ntotal = 5 * countOfFiveCents + 2 * countOfTwoCents + countOfOneCents\nprint('总面值:' + str(total) + '分')\n",
"<assignment token>\nprint('总共需要' + str(area) + '平方米地毯。')\nprint('总共需要' + str(area * 9) + '平方尺地毯。')\n<assignment token>\nprint('地毯的总价是:' + str(priceOfCarpet * area * 9))\n<assignment token>\nprint('总面值:' + str(total) + '分')\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,501 |
96913d5b6acb0dfb76d2fc51e9225dd7601fe864
|
import torch
from models.model import Model
input = torch.randn(32, 1, 48, 600)
model = Model(num_classes = 1000)
output = model(input)
print('output: ', output.size())
|
[
"import torch \nfrom models.model import Model\ninput = torch.randn(32, 1, 48, 600)\n\nmodel = Model(num_classes = 1000)\noutput = model(input)\n\nprint('output: ', output.size())\n",
"import torch\nfrom models.model import Model\ninput = torch.randn(32, 1, 48, 600)\nmodel = Model(num_classes=1000)\noutput = model(input)\nprint('output: ', output.size())\n",
"<import token>\ninput = torch.randn(32, 1, 48, 600)\nmodel = Model(num_classes=1000)\noutput = model(input)\nprint('output: ', output.size())\n",
"<import token>\n<assignment token>\nprint('output: ', output.size())\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,502 |
5572d2a50b38f24acf737481534b16d47c5979c3
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 19 18:39:04 2018
Description: Functions to describe the basic info of a DataFrame.
The actual functions are the following:
- describe_features_one_by_one
- full_description
- names
@author: LauradaSilva
"""
import pandas as pd
import numpy as np
def describe_features_one_by_one(myDF):
'''
Description of each feature of a dataframe one by one.
'''
print("Dimension of this data frame", myDF.shape)
print("----------------------------------------")
var = "go"
for feature in myDF:
if var != "exit":
print("----------------------------------------")
print(myDF[feature].describe())
print("----------------------------------------")
var = input("Press any button to continue or write exit to finish \n")
else:
break
def full_description(myDF):
'''
Full description of a DataFrame.
Includying: basic statistics + number of missing values + data types
'''
dfDescription = myDF.describe(include = "all").transpose()
dfDescription["missingValues"] = myDF.isnull().sum()
dfDescription["dataType"] = myDF.dtypes
return dfDescription
def names(myDF):
'''
Get the names and indices of the features in the Data Frame
'''
index = list(range(0,len(myDF.columns),1))
name = list(myDF.columns.values)
nameDF = np.column_stack([index,name])#pd.DataFrame({index,name})
return nameDF
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 19 18:39:04 2018\r\nDescription: Functions to describe the basic info of a DataFrame.\r\nThe actual functions are the following:\r\n - describe_features_one_by_one\r\n - full_description\r\n - names\r\n@author: LauradaSilva\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndef describe_features_one_by_one(myDF):\r\n '''\r\n Description of each feature of a dataframe one by one.\r\n '''\r\n \r\n print(\"Dimension of this data frame\", myDF.shape)\r\n print(\"----------------------------------------\")\r\n var = \"go\"\r\n for feature in myDF:\r\n if var != \"exit\":\r\n print(\"----------------------------------------\")\r\n print(myDF[feature].describe())\r\n print(\"----------------------------------------\")\r\n var = input(\"Press any button to continue or write exit to finish \\n\")\r\n else:\r\n break\r\n\r\n\r\ndef full_description(myDF):\r\n '''\r\n Full description of a DataFrame.\r\n Includying: basic statistics + number of missing values + data types\r\n '''\r\n \r\n dfDescription = myDF.describe(include = \"all\").transpose()\r\n dfDescription[\"missingValues\"] = myDF.isnull().sum()\r\n dfDescription[\"dataType\"] = myDF.dtypes\r\n return dfDescription\r\n\r\n\r\ndef names(myDF):\r\n '''\r\n Get the names and indices of the features in the Data Frame\r\n '''\r\n \r\n index = list(range(0,len(myDF.columns),1))\r\n name = list(myDF.columns.values)\r\n nameDF = np.column_stack([index,name])#pd.DataFrame({index,name})\r\n return nameDF",
"<docstring token>\nimport pandas as pd\nimport numpy as np\n\n\ndef describe_features_one_by_one(myDF):\n \"\"\"\n Description of each feature of a dataframe one by one.\n \"\"\"\n print('Dimension of this data frame', myDF.shape)\n print('----------------------------------------')\n var = 'go'\n for feature in myDF:\n if var != 'exit':\n print('----------------------------------------')\n print(myDF[feature].describe())\n print('----------------------------------------')\n var = input(\n 'Press any button to continue or write exit to finish \\n')\n else:\n break\n\n\ndef full_description(myDF):\n \"\"\"\n Full description of a DataFrame.\n Includying: basic statistics + number of missing values + data types\n \"\"\"\n dfDescription = myDF.describe(include='all').transpose()\n dfDescription['missingValues'] = myDF.isnull().sum()\n dfDescription['dataType'] = myDF.dtypes\n return dfDescription\n\n\ndef names(myDF):\n \"\"\"\n Get the names and indices of the features in the Data Frame\n \"\"\"\n index = list(range(0, len(myDF.columns), 1))\n name = list(myDF.columns.values)\n nameDF = np.column_stack([index, name])\n return nameDF\n",
"<docstring token>\n<import token>\n\n\ndef describe_features_one_by_one(myDF):\n \"\"\"\n Description of each feature of a dataframe one by one.\n \"\"\"\n print('Dimension of this data frame', myDF.shape)\n print('----------------------------------------')\n var = 'go'\n for feature in myDF:\n if var != 'exit':\n print('----------------------------------------')\n print(myDF[feature].describe())\n print('----------------------------------------')\n var = input(\n 'Press any button to continue or write exit to finish \\n')\n else:\n break\n\n\ndef full_description(myDF):\n \"\"\"\n Full description of a DataFrame.\n Includying: basic statistics + number of missing values + data types\n \"\"\"\n dfDescription = myDF.describe(include='all').transpose()\n dfDescription['missingValues'] = myDF.isnull().sum()\n dfDescription['dataType'] = myDF.dtypes\n return dfDescription\n\n\ndef names(myDF):\n \"\"\"\n Get the names and indices of the features in the Data Frame\n \"\"\"\n index = list(range(0, len(myDF.columns), 1))\n name = list(myDF.columns.values)\n nameDF = np.column_stack([index, name])\n return nameDF\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef full_description(myDF):\n \"\"\"\n Full description of a DataFrame.\n Includying: basic statistics + number of missing values + data types\n \"\"\"\n dfDescription = myDF.describe(include='all').transpose()\n dfDescription['missingValues'] = myDF.isnull().sum()\n dfDescription['dataType'] = myDF.dtypes\n return dfDescription\n\n\ndef names(myDF):\n \"\"\"\n Get the names and indices of the features in the Data Frame\n \"\"\"\n index = list(range(0, len(myDF.columns), 1))\n name = list(myDF.columns.values)\n nameDF = np.column_stack([index, name])\n return nameDF\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\ndef names(myDF):\n \"\"\"\n Get the names and indices of the features in the Data Frame\n \"\"\"\n index = list(range(0, len(myDF.columns), 1))\n name = list(myDF.columns.values)\n nameDF = np.column_stack([index, name])\n return nameDF\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,503 |
65496cd26d23bb9fa975eb0b87096daaf5685bc3
|
import numpy as np
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
import matplotlib.pyplot as plt
import time
def convertToRGB(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
haar_face_cascade = cv2.CascadeClassifier('/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/classifiers/haarcascade_frontalface_alt.xml')
img = cv2.imread('/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/baby.png')
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Detect faces
faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5);
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#print((faces))
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 255), 3)
plt.imshow(img)
|
[
"import numpy as np\nimport sys\nsys.path.append('/usr/local/lib/python2.7/site-packages')\nimport cv2\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef convertToRGB(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\nhaar_face_cascade = cv2.CascadeClassifier('/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/classifiers/haarcascade_frontalface_alt.xml')\nimg = cv2.imread('/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/baby.png')\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n ret, frame = cap.read()\n gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n #Detect faces\n faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5);\n \n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)\n \n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n\n \n\n\n\n \n#print((faces))\n\nfor (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 255), 3)\n\nplt.imshow(img)",
"import numpy as np\nimport sys\nsys.path.append('/usr/local/lib/python2.7/site-packages')\nimport cv2\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef convertToRGB(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\nhaar_face_cascade = cv2.CascadeClassifier(\n '/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/classifiers/haarcascade_frontalface_alt.xml'\n )\nimg = cv2.imread(\n '/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/baby.png')\ncap = cv2.VideoCapture(0)\nwhile True:\n ret, frame = cap.read()\n gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.1,\n minNeighbors=5)\n for x, y, w, h in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\nfor x, y, w, h in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 3)\nplt.imshow(img)\n",
"<import token>\nsys.path.append('/usr/local/lib/python2.7/site-packages')\n<import token>\n\n\ndef convertToRGB(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\nhaar_face_cascade = cv2.CascadeClassifier(\n '/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/classifiers/haarcascade_frontalface_alt.xml'\n )\nimg = cv2.imread(\n '/Users/Vicky/Dropbox/Projects/WebCamVideoFaceDetector/baby.png')\ncap = cv2.VideoCapture(0)\nwhile True:\n ret, frame = cap.read()\n gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.1,\n minNeighbors=5)\n for x, y, w, h in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\nfor x, y, w, h in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 3)\nplt.imshow(img)\n",
"<import token>\nsys.path.append('/usr/local/lib/python2.7/site-packages')\n<import token>\n\n\ndef convertToRGB(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\n<assignment token>\nwhile True:\n ret, frame = cap.read()\n gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = haar_face_cascade.detectMultiScale(gray_img, scaleFactor=1.1,\n minNeighbors=5)\n for x, y, w, h in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\nfor x, y, w, h in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 3)\nplt.imshow(img)\n",
"<import token>\n<code token>\n<import token>\n\n\ndef convertToRGB(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,504 |
d3915d2fbe4eeea11588759053773396c8ccb77c
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 15:43:21 2019
@author: Tang
"""
'''
题目描述
有一棵无穷大的满二叉树,其结点按根结点一层一层地从左往右依次编号,根结点编号为1。现在有两个结点a,b。
请设计一个算法,求出a和b点的最近公共祖先的编号。
给定两个int a,b。为给定结点的编号。请返回a和b的最近公共祖先的编号。注意这里结点本身也可认为是其祖先。
测试样例:
2,3
返回:1
'''
def solution(a,b):
while a!=b:
if a>b:
a/=2
elif a<b:
b/=2
else:
break
return a
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 15:43:21 2019\n\n@author: Tang\n\"\"\"\n\n'''\n题目描述\n\n\n\n有一棵无穷大的满二叉树,其结点按根结点一层一层地从左往右依次编号,根结点编号为1。现在有两个结点a,b。\n请设计一个算法,求出a和b点的最近公共祖先的编号。\n\n给定两个int a,b。为给定结点的编号。请返回a和b的最近公共祖先的编号。注意这里结点本身也可认为是其祖先。\n\n测试样例:\n2,3\n返回:1\n'''\ndef solution(a,b):\n while a!=b:\n if a>b:\n a/=2\n elif a<b:\n b/=2\n else:\n break\n return a",
"<docstring token>\n\n\ndef solution(a, b):\n while a != b:\n if a > b:\n a /= 2\n elif a < b:\n b /= 2\n else:\n break\n return a\n",
"<docstring token>\n<function token>\n"
] | false |
99,505 |
093d0199dc708d67583a277020b4184ec22f4eb2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 17:26:41 2019
@author: murraymorrison
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from train import fetch_team_stats
import pandas as pd
import numpy as np
#data = data values of game info
#num_components = number of principle components
#returns new data frame with principle components of the team data extracted
def run_pca(data, num_components):
data_vals = data
target_index = len(data_vals[0])-2
team_1 = fetch_team_stats(data_vals[:,1])
team_2 = fetch_team_stats(data_vals[:,2])
x_data = np.concatenate((team_1, team_2), axis=1)
y_data = data_vals[:,target_index].astype("int")
y_df = pd.DataFrame(data = y_data)
y_df.rename(columns = {0:'target'},inplace = True)
x_data = StandardScaler().fit_transform(x_data)
lol_pca = PCA(n_components=num_components)
principal_components = lol_pca.fit_transform(x_data)
principal_df = pd.DataFrame(data = principal_components)
finalDf = pd.concat([principal_df, y_df], axis = 1)
ex_var = sum(lol_pca.explained_variance_ratio_)
for i in range(1,1+ num_components):
print('Principle Component ' + str(i) + ' explains ' + str(lol_pca.explained_variance_ratio_[i-1])+' of the total variance')
print()
print('Total Explained Variance: ' + str(ex_var)+' for '+str(num_components)+' principle components')
return ex_var,finalDf
def pca_plot():
args = manual_args()
train, test = prep_data(args)
complete_data = np.concatenate((train,test),axis =0)
ex_var_list = []
number_components = range(11)
for i in number_components:
ex_var, reduced_df = run_pca(complete_data,i)
ex_var_list.append(ex_var)
plt.pyplot.xlabel('Number of Components')
plt.pyplot.ylabel('Explained Variance')
plt.pyplot.title('Principal Component Variance Explained')
plt.pyplot.plot(number_components,ex_var_list)
plt.pyplot.savefig('pca.png')
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 11 17:26:41 2019\n\n@author: murraymorrison\n\"\"\"\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler \nfrom train import fetch_team_stats\n\nimport pandas as pd\nimport numpy as np\n\n#data = data values of game info\n#num_components = number of principle components\n#returns new data frame with principle components of the team data extracted\ndef run_pca(data, num_components):\n \n data_vals = data\n \n target_index = len(data_vals[0])-2\n \n \n \n team_1 = fetch_team_stats(data_vals[:,1])\n team_2 = fetch_team_stats(data_vals[:,2])\n \n x_data = np.concatenate((team_1, team_2), axis=1) \n y_data = data_vals[:,target_index].astype(\"int\")\n \n \n y_df = pd.DataFrame(data = y_data)\n \n \n y_df.rename(columns = {0:'target'},inplace = True)\n \n x_data = StandardScaler().fit_transform(x_data)\n \n lol_pca = PCA(n_components=num_components)\n \n principal_components = lol_pca.fit_transform(x_data)\n \n \n \n principal_df = pd.DataFrame(data = principal_components)\n \n finalDf = pd.concat([principal_df, y_df], axis = 1)\n \n ex_var = sum(lol_pca.explained_variance_ratio_)\n for i in range(1,1+ num_components):\n print('Principle Component ' + str(i) + ' explains ' + str(lol_pca.explained_variance_ratio_[i-1])+' of the total variance')\n \n print() \n print('Total Explained Variance: ' + str(ex_var)+' for '+str(num_components)+' principle components')\n \n return ex_var,finalDf\n\n\n\n\ndef pca_plot():\n \n args = manual_args()\n \n train, test = prep_data(args)\n complete_data = np.concatenate((train,test),axis =0)\n \n ex_var_list = []\n \n number_components = range(11)\n \n for i in number_components:\n ex_var, reduced_df = run_pca(complete_data,i)\n ex_var_list.append(ex_var)\n \n plt.pyplot.xlabel('Number of Components')\n plt.pyplot.ylabel('Explained Variance')\n plt.pyplot.title('Principal Component Variance Explained')\n \n plt.pyplot.plot(number_components,ex_var_list)\n plt.pyplot.savefig('pca.png')\n \n",
"<docstring token>\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom train import fetch_team_stats\nimport pandas as pd\nimport numpy as np\n\n\ndef run_pca(data, num_components):\n data_vals = data\n target_index = len(data_vals[0]) - 2\n team_1 = fetch_team_stats(data_vals[:, 1])\n team_2 = fetch_team_stats(data_vals[:, 2])\n x_data = np.concatenate((team_1, team_2), axis=1)\n y_data = data_vals[:, target_index].astype('int')\n y_df = pd.DataFrame(data=y_data)\n y_df.rename(columns={(0): 'target'}, inplace=True)\n x_data = StandardScaler().fit_transform(x_data)\n lol_pca = PCA(n_components=num_components)\n principal_components = lol_pca.fit_transform(x_data)\n principal_df = pd.DataFrame(data=principal_components)\n finalDf = pd.concat([principal_df, y_df], axis=1)\n ex_var = sum(lol_pca.explained_variance_ratio_)\n for i in range(1, 1 + num_components):\n print('Principle Component ' + str(i) + ' explains ' + str(lol_pca.\n explained_variance_ratio_[i - 1]) + ' of the total variance')\n print()\n print('Total Explained Variance: ' + str(ex_var) + ' for ' + str(\n num_components) + ' principle components')\n return ex_var, finalDf\n\n\ndef pca_plot():\n args = manual_args()\n train, test = prep_data(args)\n complete_data = np.concatenate((train, test), axis=0)\n ex_var_list = []\n number_components = range(11)\n for i in number_components:\n ex_var, reduced_df = run_pca(complete_data, i)\n ex_var_list.append(ex_var)\n plt.pyplot.xlabel('Number of Components')\n plt.pyplot.ylabel('Explained Variance')\n plt.pyplot.title('Principal Component Variance Explained')\n plt.pyplot.plot(number_components, ex_var_list)\n plt.pyplot.savefig('pca.png')\n",
"<docstring token>\n<import token>\n\n\ndef run_pca(data, num_components):\n data_vals = data\n target_index = len(data_vals[0]) - 2\n team_1 = fetch_team_stats(data_vals[:, 1])\n team_2 = fetch_team_stats(data_vals[:, 2])\n x_data = np.concatenate((team_1, team_2), axis=1)\n y_data = data_vals[:, target_index].astype('int')\n y_df = pd.DataFrame(data=y_data)\n y_df.rename(columns={(0): 'target'}, inplace=True)\n x_data = StandardScaler().fit_transform(x_data)\n lol_pca = PCA(n_components=num_components)\n principal_components = lol_pca.fit_transform(x_data)\n principal_df = pd.DataFrame(data=principal_components)\n finalDf = pd.concat([principal_df, y_df], axis=1)\n ex_var = sum(lol_pca.explained_variance_ratio_)\n for i in range(1, 1 + num_components):\n print('Principle Component ' + str(i) + ' explains ' + str(lol_pca.\n explained_variance_ratio_[i - 1]) + ' of the total variance')\n print()\n print('Total Explained Variance: ' + str(ex_var) + ' for ' + str(\n num_components) + ' principle components')\n return ex_var, finalDf\n\n\ndef pca_plot():\n args = manual_args()\n train, test = prep_data(args)\n complete_data = np.concatenate((train, test), axis=0)\n ex_var_list = []\n number_components = range(11)\n for i in number_components:\n ex_var, reduced_df = run_pca(complete_data, i)\n ex_var_list.append(ex_var)\n plt.pyplot.xlabel('Number of Components')\n plt.pyplot.ylabel('Explained Variance')\n plt.pyplot.title('Principal Component Variance Explained')\n plt.pyplot.plot(number_components, ex_var_list)\n plt.pyplot.savefig('pca.png')\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef pca_plot():\n args = manual_args()\n train, test = prep_data(args)\n complete_data = np.concatenate((train, test), axis=0)\n ex_var_list = []\n number_components = range(11)\n for i in number_components:\n ex_var, reduced_df = run_pca(complete_data, i)\n ex_var_list.append(ex_var)\n plt.pyplot.xlabel('Number of Components')\n plt.pyplot.ylabel('Explained Variance')\n plt.pyplot.title('Principal Component Variance Explained')\n plt.pyplot.plot(number_components, ex_var_list)\n plt.pyplot.savefig('pca.png')\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n"
] | false |
99,506 |
08a9efbeb925efc210e2cb9f2a0a9a1ff42339c1
|
for _ in range(int(input())):
n=int(input())
a=[]
for i in range(n):
a.append(input())
for i in range(1,n):
a[i]=set(a[i]).intersection(set(a[i-1]))
print(len(a[-1]))
|
[
"for _ in range(int(input())):\n n=int(input())\n a=[]\n for i in range(n):\n a.append(input())\n for i in range(1,n):\n a[i]=set(a[i]).intersection(set(a[i-1]))\n print(len(a[-1]))\n",
"for _ in range(int(input())):\n n = int(input())\n a = []\n for i in range(n):\n a.append(input())\n for i in range(1, n):\n a[i] = set(a[i]).intersection(set(a[i - 1]))\n print(len(a[-1]))\n",
"<code token>\n"
] | false |
99,507 |
395c0b7fab581f7ea0cae7901f6689e19f48145d
|
def maximum(iterable):
"""Returns the maximum of a string if no error else returns -999"""
max_item = 0
if type(iterable) == list or type(iterable) == tuple:
for i in iterable:
if type(i)==int:
if max_item<i:
max_item = i
else:
max_item = -999
break
else:
max_item=-999
return max_item
a = [12,3,4,5,56]
if maximum(a) != -999:
print("There is no error")
else:
print("Error")
|
[
"def maximum(iterable):\n \"\"\"Returns the maximum of a string if no error else returns -999\"\"\"\n max_item = 0\n if type(iterable) == list or type(iterable) == tuple:\n for i in iterable:\n if type(i)==int:\n if max_item<i:\n max_item = i\n else:\n max_item = -999\n break\n else:\n\n max_item=-999\n return max_item\n\n\na = [12,3,4,5,56]\nif maximum(a) != -999:\n print(\"There is no error\")\nelse:\n print(\"Error\")\n\n\n\n",
"def maximum(iterable):\n \"\"\"Returns the maximum of a string if no error else returns -999\"\"\"\n max_item = 0\n if type(iterable) == list or type(iterable) == tuple:\n for i in iterable:\n if type(i) == int:\n if max_item < i:\n max_item = i\n else:\n max_item = -999\n break\n else:\n max_item = -999\n return max_item\n\n\na = [12, 3, 4, 5, 56]\nif maximum(a) != -999:\n print('There is no error')\nelse:\n print('Error')\n",
"def maximum(iterable):\n \"\"\"Returns the maximum of a string if no error else returns -999\"\"\"\n max_item = 0\n if type(iterable) == list or type(iterable) == tuple:\n for i in iterable:\n if type(i) == int:\n if max_item < i:\n max_item = i\n else:\n max_item = -999\n break\n else:\n max_item = -999\n return max_item\n\n\n<assignment token>\nif maximum(a) != -999:\n print('There is no error')\nelse:\n print('Error')\n",
"def maximum(iterable):\n \"\"\"Returns the maximum of a string if no error else returns -999\"\"\"\n max_item = 0\n if type(iterable) == list or type(iterable) == tuple:\n for i in iterable:\n if type(i) == int:\n if max_item < i:\n max_item = i\n else:\n max_item = -999\n break\n else:\n max_item = -999\n return max_item\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
99,508 |
13d0c64d0779f823551deb5a570e5cfb88525e58
|
ITEM: TIMESTEP
6500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
4.9685525364850847e-01 4.6703144746345622e+01
4.9685525364850847e-01 4.6703144746345622e+01
4.9685525364850847e-01 4.6703144746345622e+01
ITEM: ATOMS id type xs ys zs
8 1 0.124027 0.0605757 0.062418
35 1 0.0614849 0.121145 0.0650287
130 1 0.0645043 0.0582864 0.120941
165 1 0.130173 0.122103 0.127058
161 1 1.00136 0.115003 0.128503
4 1 0.996325 0.0662328 0.0571884
1565 1 0.870323 0.495485 0.49821
1413 1 0.127806 0.499275 0.375213
12 1 0.252837 0.0578218 0.0634762
39 1 0.190508 0.118691 0.0639576
43 1 0.310843 0.121484 0.0561942
134 1 0.18167 0.0589925 0.128465
138 1 0.319261 0.062565 0.131304
169 1 0.250336 0.117968 0.128686
277 1 0.618997 0.00398747 0.252927
1157 1 0.122789 0.499935 0.116823
133 1 0.125913 -0.00272777 0.12472
58 1 0.814673 0.192289 0.00254142
275 1 0.557807 0.00325537 0.313229
1183 1 0.943285 0.493774 0.183806
16 1 0.375018 0.0582813 0.0577936
47 1 0.434503 0.123485 0.0638688
142 1 0.427413 0.0615475 0.123744
173 1 0.372169 0.125608 0.111977
20 1 0.492284 0.0593561 0.0608375
177 1 0.496252 0.119442 0.123938
93 1 0.874942 0.250757 0.0010056
411 1 0.811879 0.00332744 0.439538
117 1 0.625717 0.374268 0.000258183
24 1 0.625516 0.0656289 0.0564604
51 1 0.564969 0.125251 0.0668679
146 1 0.5733 0.0606442 0.125472
181 1 0.627221 0.12984 0.128317
405 1 0.620858 0.003977 0.383241
15 1 0.435702 0.00215418 0.0630545
387 1 0.0638477 -0.00175607 0.441134
28 1 0.751636 0.0633116 0.0595221
55 1 0.684992 0.125645 0.0641966
59 1 0.814003 0.124356 0.0628433
150 1 0.686925 0.0699205 0.11995
154 1 0.805352 0.0633346 0.13076
185 1 0.746029 0.125353 0.135949
484 1 0.00171759 0.442589 0.438175
74 1 0.31012 0.307245 0.00299423
509 1 0.878912 0.376288 0.376782
32 1 0.874877 0.0620149 0.059696
63 1 0.932534 0.124042 0.0588462
158 1 0.939869 0.0608514 0.12221
189 1 0.875234 0.1174 0.121355
143 1 0.430621 0.00466889 0.189029
1177 1 0.75204 0.501446 0.133197
529 1 0.495458 -0.00226026 0.50253
1557 1 0.629923 0.500215 0.499978
40 1 0.136528 0.180503 0.0644352
67 1 0.0658326 0.248693 0.0655847
72 1 0.131755 0.308875 0.060752
162 1 0.0675224 0.189568 0.117399
194 1 0.064514 0.304757 0.129507
197 1 0.133687 0.246392 0.124427
193 1 0.996525 0.245634 0.119422
36 1 0.995317 0.181659 0.0661975
1291 1 0.318706 0.498354 0.323872
44 1 0.250883 0.176513 0.0711533
71 1 0.191047 0.242423 0.0633227
75 1 0.31185 0.248532 0.0631177
76 1 0.244812 0.306949 0.0711252
166 1 0.190707 0.179176 0.129502
170 1 0.307854 0.178417 0.135041
198 1 0.18714 0.31414 0.131208
201 1 0.247739 0.241865 0.12682
202 1 0.302465 0.309016 0.134541
1295 1 0.445437 0.500532 0.317285
48 1 0.368525 0.18207 0.0619188
79 1 0.422121 0.243006 0.0638909
80 1 0.37014 0.31818 0.0674595
174 1 0.430015 0.181384 0.124454
205 1 0.370911 0.248145 0.127913
206 1 0.432924 0.312347 0.133316
1427 1 0.565654 0.499866 0.442012
613 1 0.11537 0.371701 0.498388
84 1 0.493524 0.307556 0.0687192
52 1 0.492688 0.192069 0.0693322
209 1 0.486029 0.246221 0.12885
56 1 0.620968 0.192495 0.0616076
83 1 0.553884 0.248284 0.0659291
88 1 0.61996 0.306062 0.0615521
178 1 0.558423 0.177515 0.131433
210 1 0.564663 0.312841 0.126163
213 1 0.624975 0.245988 0.128908
157 1 0.878391 -0.00589931 0.121414
60 1 0.750646 0.187359 0.0575136
87 1 0.687928 0.250715 0.0637152
91 1 0.815725 0.252044 0.066574
92 1 0.74598 0.316699 0.0557074
182 1 0.68818 0.191972 0.127295
186 1 0.812809 0.183343 0.127641
214 1 0.682622 0.316223 0.123406
217 1 0.749495 0.247396 0.12145
218 1 0.815788 0.31112 0.122137
81 1 0.491892 0.248407 0.00882873
90 1 0.810774 0.311417 0.000868988
50 1 0.555727 0.188245 -0.00247212
68 1 1.00389 0.311877 0.065874
64 1 0.870664 0.183789 0.0624047
95 1 0.939224 0.25651 0.064857
96 1 0.874704 0.313049 0.0687962
190 1 0.932282 0.181748 0.123478
221 1 0.87772 0.247722 0.126263
222 1 0.935708 0.318695 0.131287
99 1 0.0696689 0.372862 0.0664065
104 1 0.131305 0.435554 0.0588486
226 1 0.0664863 0.430828 0.125965
229 1 0.124798 0.371951 0.130198
100 1 0.997314 0.431954 0.0680139
225 1 -0.00190256 0.37467 0.130167
510 1 0.944098 0.442331 0.378386
1041 1 0.505275 0.50067 0.00114927
97 1 0.00172254 0.37153 0.00742765
578 1 0.0544702 0.319196 0.495246
511 1 0.942042 0.377171 0.432706
103 1 0.195137 0.37578 0.0567009
107 1 0.310282 0.377196 0.0686541
108 1 0.250182 0.439657 0.0675703
230 1 0.180525 0.435806 0.127331
233 1 0.246205 0.375471 0.126651
234 1 0.309776 0.436098 0.129058
554 1 0.309855 0.178932 0.493457
585 1 0.249927 0.24732 0.49875
7 1 0.189044 0.000690827 0.0631238
512 1 0.870766 0.438943 0.43906
111 1 0.440811 0.372327 0.0648447
112 1 0.371813 0.444238 0.0624082
237 1 0.375517 0.371081 0.134109
238 1 0.430934 0.438851 0.121749
241 1 0.498573 0.367865 0.126547
409 1 0.747653 0.00888879 0.369429
598 1 0.692484 0.311376 0.498251
1431 1 0.687969 0.49723 0.438761
502 1 0.684855 0.436524 0.378128
633 1 0.753741 0.374694 0.497921
116 1 0.501033 0.431408 0.0652212
115 1 0.559635 0.370211 0.0665195
120 1 0.619962 0.441394 0.0653725
242 1 0.559135 0.438765 0.123515
245 1 0.618984 0.377004 0.118307
271 1 0.442346 0.00527261 0.310839
505 1 0.749432 0.372475 0.37712
508 1 0.75271 0.436338 0.432385
119 1 0.685608 0.375044 0.0621977
123 1 0.815916 0.376048 0.0683995
124 1 0.747576 0.446185 0.065936
246 1 0.686674 0.436805 0.122022
249 1 0.748657 0.379764 0.124147
250 1 0.814418 0.441908 0.127937
1043 1 0.560306 0.499064 0.0593482
557 1 0.365569 0.113592 0.495461
127 1 0.937828 0.376257 0.0711333
128 1 0.877317 0.440769 0.0624117
253 1 0.878352 0.369804 0.125212
254 1 0.934559 0.442893 0.121074
1025 1 0.999194 0.496348 0.00106045
614 1 0.179656 0.43412 0.501727
137 1 0.249616 0.00550752 0.129811
136 1 0.123268 0.0607639 0.190718
163 1 0.0628846 0.113558 0.186653
258 1 0.0658255 0.0624736 0.256695
264 1 0.12364 0.0606682 0.315599
291 1 0.0599016 0.121471 0.31657
293 1 0.121781 0.130973 0.24671
289 1 1.00159 0.120993 0.245513
260 1 -0.000472387 0.0639423 0.314381
132 1 0.000682919 0.0575405 0.192423
126 1 0.938948 0.428128 0.00857947
279 1 0.678366 0.00491174 0.319319
506 1 0.817322 0.436679 0.37126
27 1 0.817235 0.000450522 0.0683754
140 1 0.250379 0.0597291 0.187952
167 1 0.183992 0.117129 0.194752
171 1 0.311073 0.120223 0.190118
262 1 0.18475 0.0551277 0.252322
266 1 0.307051 0.0590576 0.251856
268 1 0.242403 0.0593037 0.312658
295 1 0.182415 0.121115 0.315735
297 1 0.25083 0.122748 0.254938
299 1 0.313755 0.115702 0.308522
1425 1 0.501417 0.499325 0.378082
144 1 0.374098 0.064756 0.196403
175 1 0.435512 0.122684 0.187457
270 1 0.435135 0.0668614 0.249018
272 1 0.374187 0.0548594 0.306823
301 1 0.376711 0.129133 0.255169
303 1 0.434387 0.131941 0.314953
276 1 0.498955 0.0665224 0.314225
148 1 0.499916 0.0595792 0.18794
503 1 0.686997 0.369418 0.443897
17 1 0.497508 0.00448316 -0.00558166
507 1 0.812275 0.380391 0.438211
113 1 0.497254 0.375713 0.00556926
305 1 0.499531 0.126634 0.252858
152 1 0.624497 0.0673638 0.189018
179 1 0.557925 0.120938 0.188599
274 1 0.558138 0.0658253 0.251003
280 1 0.620122 0.0682037 0.315529
307 1 0.56155 0.124764 0.308768
309 1 0.624358 0.128795 0.250428
23 1 0.684706 0.00636802 0.0594369
500 1 0.509937 0.433271 0.433719
156 1 0.743596 0.0629906 0.19936
183 1 0.688608 0.129086 0.195791
187 1 0.812634 0.123141 0.197569
278 1 0.678089 0.0631939 0.255429
282 1 0.811867 0.0592246 0.251986
284 1 0.745023 0.0677282 0.314747
311 1 0.683314 0.125062 0.318181
313 1 0.749327 0.130111 0.254661
315 1 0.809611 0.122172 0.31124
122 1 0.810712 0.441372 0.000493079
73 1 0.249941 0.242503 0.000514448
1433 1 0.752771 0.495223 0.369611
160 1 0.875401 0.0504209 0.188455
191 1 0.937905 0.120875 0.186442
286 1 0.94472 0.0567765 0.24683
288 1 0.875883 0.0617148 0.311749
317 1 0.883134 0.112723 0.252505
319 1 0.942168 0.121884 0.312002
168 1 0.115476 0.182756 0.184413
195 1 0.0649902 0.246654 0.191526
200 1 0.126594 0.308746 0.193061
290 1 0.058512 0.17972 0.244089
296 1 0.124301 0.178861 0.316615
322 1 0.0655303 0.309343 0.254661
323 1 0.0709781 0.241779 0.309312
325 1 0.12692 0.237948 0.251868
328 1 0.118211 0.311533 0.320929
292 1 0.996531 0.186329 0.302648
164 1 0.998181 0.180362 0.183193
172 1 0.244621 0.176577 0.193373
199 1 0.187637 0.245455 0.194802
203 1 0.304195 0.249678 0.187391
204 1 0.248967 0.317646 0.197785
294 1 0.187941 0.18056 0.253768
298 1 0.308857 0.186429 0.249586
300 1 0.247682 0.181296 0.316466
326 1 0.184492 0.311418 0.257699
327 1 0.184542 0.245521 0.316425
329 1 0.242035 0.24233 0.259707
330 1 0.306619 0.311528 0.251924
331 1 0.311807 0.252624 0.319602
332 1 0.248341 0.312579 0.316844
176 1 0.373497 0.184047 0.186061
207 1 0.42723 0.251668 0.190508
208 1 0.368531 0.308078 0.18682
302 1 0.428914 0.190226 0.248706
304 1 0.376583 0.194425 0.308203
333 1 0.36769 0.250006 0.253011
334 1 0.439226 0.312841 0.249097
335 1 0.441382 0.250684 0.320989
336 1 0.37543 0.309711 0.307327
308 1 0.501513 0.184651 0.312744
340 1 0.501616 0.30865 0.314456
212 1 0.500756 0.309201 0.190172
337 1 0.502262 0.241899 0.248972
180 1 0.493451 0.185159 0.188629
184 1 0.624368 0.193636 0.19182
211 1 0.563423 0.249957 0.184113
216 1 0.625519 0.310562 0.187969
306 1 0.560878 0.184517 0.254767
312 1 0.631143 0.18093 0.318663
338 1 0.559874 0.303418 0.249749
339 1 0.559683 0.241977 0.314251
341 1 0.623177 0.252116 0.253985
344 1 0.629875 0.308359 0.317041
188 1 0.752821 0.191241 0.186901
215 1 0.69559 0.252309 0.185479
219 1 0.819435 0.250797 0.191451
220 1 0.74763 0.313327 0.178131
310 1 0.686896 0.195398 0.251782
314 1 0.812754 0.192917 0.258555
316 1 0.752698 0.188965 0.315656
342 1 0.686564 0.313496 0.252078
343 1 0.689237 0.247685 0.309423
345 1 0.74892 0.249878 0.249197
346 1 0.811406 0.308188 0.25187
347 1 0.81875 0.250946 0.321581
348 1 0.755438 0.315777 0.316008
321 1 0.999606 0.248135 0.249935
324 1 0.993876 0.307738 0.312355
196 1 0.999007 0.307381 0.18941
192 1 0.87089 0.178101 0.189719
223 1 0.939118 0.248259 0.185047
224 1 0.883168 0.317696 0.190648
318 1 0.933993 0.189033 0.244628
320 1 0.87202 0.185341 0.307387
349 1 0.877421 0.251499 0.252975
350 1 0.940137 0.311507 0.252816
351 1 0.939859 0.24799 0.318805
352 1 0.874368 0.316834 0.317728
499 1 0.555184 0.365599 0.439897
227 1 0.0647456 0.371273 0.190329
232 1 0.124583 0.434478 0.189705
354 1 0.0669612 0.431787 0.254179
355 1 0.064788 0.377342 0.31271
357 1 0.131148 0.371076 0.257144
360 1 0.130613 0.439204 0.300775
228 1 0.00728271 0.44024 0.185099
19 1 0.566652 -0.00163079 0.058782
110 1 0.435717 0.438942 -0.000257669
525 1 0.360794 0.00575281 0.494624
231 1 0.187012 0.372214 0.196066
235 1 0.311317 0.37529 0.192315
236 1 0.251967 0.43683 0.193885
358 1 0.191557 0.44789 0.242394
359 1 0.18701 0.374869 0.319764
361 1 0.24928 0.373258 0.258267
362 1 0.313415 0.445651 0.262509
363 1 0.315602 0.37598 0.309315
364 1 0.245996 0.44047 0.310606
1429 1 0.624708 0.499103 0.371317
153 1 0.747639 -0.000631503 0.120315
239 1 0.44124 0.375757 0.187203
240 1 0.376954 0.443707 0.192774
365 1 0.374527 0.378328 0.248048
366 1 0.436453 0.43589 0.248598
367 1 0.439627 0.367749 0.314111
368 1 0.385527 0.433549 0.316943
497 1 0.499839 0.369221 0.375874
372 1 0.499826 0.436886 0.310235
244 1 0.503429 0.434361 0.186006
369 1 0.501923 0.370369 0.253136
243 1 0.558488 0.374983 0.189406
248 1 0.624827 0.430709 0.187476
370 1 0.56689 0.442478 0.250112
371 1 0.561149 0.374134 0.310788
373 1 0.625072 0.373715 0.251778
376 1 0.628733 0.432278 0.312873
247 1 0.683686 0.366734 0.188671
251 1 0.814362 0.366025 0.184953
252 1 0.74701 0.440539 0.185893
374 1 0.686176 0.43166 0.252046
375 1 0.687918 0.371188 0.316454
377 1 0.749434 0.370136 0.24922
378 1 0.816705 0.439087 0.256631
379 1 0.809076 0.375965 0.316562
380 1 0.745565 0.436077 0.309598
589 1 0.371753 0.250045 0.496157
353 1 0.00788098 0.367565 0.250467
356 1 0.99718 0.438745 0.315084
255 1 0.941326 0.382527 0.192346
256 1 0.873201 0.435747 0.195414
381 1 0.875707 0.376381 0.255791
382 1 0.940351 0.441406 0.252682
383 1 0.935774 0.379252 0.308786
384 1 0.879702 0.444468 0.318488
1421 1 0.387341 0.498342 0.381125
386 1 0.0609837 0.0565484 0.381766
392 1 0.128668 0.0669427 0.4367
419 1 0.0638796 0.128805 0.43778
421 1 0.120949 0.121984 0.379628
31 1 0.937082 0.00591135 0.0545817
610 1 0.0530512 0.43093 0.504601
131 1 0.0621833 0.00159992 0.183919
498 1 0.562614 0.435481 0.374076
390 1 0.187162 0.056546 0.375046
394 1 0.306984 0.0571942 0.370316
396 1 0.245012 0.0535329 0.437898
423 1 0.190406 0.124384 0.439035
425 1 0.243418 0.115585 0.376094
427 1 0.30392 0.112358 0.438195
1179 1 0.816692 0.497155 0.192362
389 1 0.124457 0.00111881 0.38541
609 1 0.997493 0.373117 0.50312
398 1 0.43167 0.0628864 0.364112
400 1 0.366753 0.057034 0.42991
429 1 0.372302 0.128624 0.374237
431 1 0.439989 0.127175 0.43792
433 1 0.501738 0.123174 0.376603
66 1 0.0650898 0.308205 0.00150124
1173 1 0.623262 0.498178 0.121873
569 1 0.748734 0.121966 0.491584
404 1 0.499252 0.0653222 0.445695
402 1 0.56683 0.0652 0.372242
408 1 0.62293 0.0617481 0.445155
435 1 0.560045 0.120778 0.437169
437 1 0.622114 0.122679 0.375228
407 1 0.681257 0.00286107 0.438156
1283 1 0.0658446 0.493303 0.312967
406 1 0.682015 0.0592976 0.380177
410 1 0.810901 0.0613096 0.374595
412 1 0.748909 0.0580794 0.433987
439 1 0.679618 0.126531 0.43126
441 1 0.746213 0.126221 0.374985
443 1 0.817721 0.130413 0.437919
125 1 0.869662 0.37589 0.000552369
501 1 0.624687 0.363253 0.377092
388 1 0.998338 0.0683009 0.439124
417 1 -0.00135206 0.13365 0.37019
414 1 0.936732 0.057311 0.375475
416 1 0.87422 0.0616343 0.434946
445 1 0.874607 0.126393 0.372443
447 1 0.93364 0.123683 0.440269
3 1 0.0657494 0.00227119 0.0580267
86 1 0.683824 0.308084 7.60568e-05
602 1 0.808504 0.314023 0.498755
418 1 0.064053 0.188313 0.380322
424 1 0.128178 0.181541 0.444005
450 1 0.0625978 0.31681 0.378572
451 1 0.0577866 0.250003 0.448774
453 1 0.119992 0.244312 0.383466
456 1 0.119953 0.311814 0.437327
415 1 0.940147 0.00475326 0.441377
399 1 0.432256 0.00593643 0.437519
1293 1 0.374643 0.501536 0.258921
1435 1 0.810988 0.495766 0.433425
422 1 0.185894 0.183326 0.37601
426 1 0.312306 0.190203 0.368122
428 1 0.24908 0.182075 0.426937
454 1 0.183928 0.310287 0.37589
455 1 0.182994 0.251669 0.440213
457 1 0.249526 0.25254 0.369494
458 1 0.311206 0.310221 0.368863
459 1 0.304608 0.245284 0.431679
460 1 0.246923 0.315473 0.430955
430 1 0.43626 0.183994 0.372971
432 1 0.368742 0.185317 0.439554
461 1 0.372236 0.247213 0.375983
462 1 0.428484 0.311182 0.375432
463 1 0.431572 0.248338 0.440379
464 1 0.369794 0.308766 0.434793
287 1 0.938772 0.00776434 0.311414
465 1 0.492889 0.251471 0.381942
468 1 0.499232 0.313551 0.445064
436 1 0.497359 0.188272 0.439065
434 1 0.562793 0.187729 0.373549
469 1 0.625915 0.245429 0.379721
472 1 0.62359 0.314789 0.442278
466 1 0.562194 0.310657 0.377684
440 1 0.621087 0.184626 0.435541
467 1 0.563074 0.244683 0.437228
1301 1 0.626887 0.503727 0.25245
473 1 0.752826 0.24702 0.374159
474 1 0.813076 0.316064 0.378058
444 1 0.748683 0.185082 0.435461
470 1 0.691333 0.305509 0.378587
475 1 0.813033 0.243049 0.436951
471 1 0.688243 0.248044 0.43095
442 1 0.812451 0.18549 0.370249
476 1 0.750596 0.307279 0.43913
438 1 0.686827 0.18495 0.376162
401 1 0.497018 0.00842888 0.377021
478 1 0.929586 0.312109 0.3773
480 1 0.872356 0.309202 0.437645
452 1 0.994892 0.310977 0.439209
477 1 0.876743 0.250502 0.375201
449 1 0.00282274 0.248486 0.371144
420 1 0.995044 0.192891 0.435616
479 1 0.931663 0.252385 0.441295
446 1 0.937824 0.18827 0.375533
448 1 0.88237 0.191146 0.434684
582 1 0.192244 0.311619 0.494016
488 1 0.122908 0.444159 0.444666
481 1 0.993727 0.366406 0.372701
485 1 0.12117 0.380009 0.377254
482 1 0.0507113 0.436993 0.371791
483 1 0.0602252 0.382038 0.433053
542 1 0.935626 0.0598473 0.500464
281 1 0.747592 0.00235886 0.255016
534 1 0.693851 0.0602757 0.502332
489 1 0.250237 0.377889 0.370053
490 1 0.311443 0.436374 0.378032
492 1 0.255675 0.440887 0.443732
486 1 0.191797 0.431766 0.380066
487 1 0.179041 0.376409 0.439087
491 1 0.304823 0.377773 0.436222
494 1 0.44491 0.43084 0.381673
495 1 0.434639 0.37199 0.441584
493 1 0.377579 0.381815 0.377202
496 1 0.371574 0.437599 0.439107
46 1 0.431633 0.182811 0.00216911
1163 1 0.309669 0.495882 0.187366
1169 1 0.497154 0.491335 0.123158
504 1 0.624316 0.437527 0.441794
558 1 0.430481 0.188045 0.501441
1309 1 0.87859 0.498554 0.250651
617 1 0.246097 0.372995 0.497134
1029 1 0.124414 0.50288 -0.00358049
1 1 -0.00208964 0.00314588 5.55231e-05
565 1 0.620448 0.125003 0.49347
621 1 0.367045 0.374817 0.502418
622 1 0.437126 0.436892 0.496016
109 1 0.371237 0.377199 0.0119068
586 1 0.30469 0.315295 0.495106
18 1 0.56148 0.062934 0.00186725
601 1 0.750852 0.242206 0.499288
118 1 0.68983 0.433984 0.000145475
537 1 0.747263 -0.00193983 0.497173
82 1 0.553537 0.3077 0.00519466
49 1 0.492124 0.124732 0.00823674
514 1 0.0673188 0.0635847 0.501523
57 1 0.754102 0.125314 -0.00173848
590 1 0.437072 0.309926 0.496419
94 1 0.939383 0.321701 0.0082238
65 1 0.000239016 0.247812 0.00379049
638 1 0.943796 0.44032 0.495812
62 1 0.940211 0.191425 0.00189741
1553 1 0.500397 0.495068 0.499819
54 1 0.689505 0.186635 0.0027856
520 1 0.127603 0.0622117 0.565183
547 1 0.0631131 0.122656 0.567881
642 1 0.0612492 0.0585906 0.625194
677 1 0.121076 0.121381 0.629607
785 1 0.496362 0.00841942 0.753455
516 1 -0.00154086 0.0592635 0.563651
593 1 0.504343 0.248209 0.501225
524 1 0.244303 0.05953 0.564971
551 1 0.186865 0.125324 0.574765
555 1 0.320306 0.12252 0.556392
646 1 0.183474 0.0656005 0.622654
650 1 0.310706 0.0562472 0.622457
681 1 0.259631 0.121921 0.62528
996 1 0.00167628 0.439425 0.931046
553 1 0.24621 0.120386 0.51071
915 1 0.557974 0.00493583 0.938331
528 1 0.37464 0.0496381 0.56262
559 1 0.436853 0.124361 0.561481
654 1 0.434082 0.0583116 0.62292
685 1 0.36903 0.116295 0.623792
532 1 0.492344 0.0651676 0.559476
1563 1 0.811827 0.500203 0.570691
526 1 0.427598 0.0548452 0.501905
993 1 0.998342 0.371902 0.87563
689 1 0.500413 0.126235 0.621008
536 1 0.621731 0.0619033 0.563691
563 1 0.565215 0.133656 0.56212
658 1 0.557306 0.066005 0.624396
693 1 0.628033 0.127984 0.620134
1695 1 0.937551 0.494861 0.681911
1951 1 0.938166 0.494317 0.944389
519 1 0.187692 -0.00378461 0.562053
921 1 0.750724 -0.00241393 0.871053
1543 1 0.192208 0.498408 0.565881
540 1 0.747358 0.0628483 0.56718
567 1 0.68912 0.126539 0.550381
571 1 0.803782 0.128605 0.566076
662 1 0.68198 0.0648257 0.62191
666 1 0.81684 0.05902 0.626529
697 1 0.749708 0.1227 0.631331
1945 1 0.754548 0.500444 0.87813
673 1 0.999209 0.12144 0.631371
544 1 0.870506 0.0624727 0.560337
575 1 0.938582 0.12108 0.562576
670 1 0.935185 0.0632801 0.626948
701 1 0.878793 0.119152 0.62505
626 1 0.560622 0.438164 0.504211
899 1 0.0632838 0.00400554 0.935061
1925 1 0.123052 0.494398 0.870594
561 1 0.499508 0.122711 0.501306
552 1 0.126782 0.183619 0.56491
579 1 0.0709448 0.24925 0.562507
584 1 0.125158 0.307272 0.56567
674 1 0.0582969 0.190661 0.634613
706 1 0.0600732 0.31309 0.624821
709 1 0.124207 0.251809 0.624289
580 1 0.00720022 0.316454 0.562972
556 1 0.249799 0.183595 0.562724
583 1 0.190532 0.247386 0.561695
587 1 0.31711 0.245321 0.560865
588 1 0.248698 0.303362 0.562334
678 1 0.190978 0.185961 0.624944
682 1 0.317604 0.189321 0.626988
710 1 0.188729 0.308784 0.62192
713 1 0.25373 0.243353 0.625458
714 1 0.314344 0.315304 0.615486
791 1 0.685033 0.00504165 0.814471
560 1 0.375206 0.194185 0.563436
591 1 0.44191 0.248749 0.561016
592 1 0.373701 0.310411 0.558331
686 1 0.442007 0.192721 0.622581
717 1 0.382213 0.252724 0.621751
718 1 0.431126 0.317189 0.619437
538 1 0.813723 0.0672602 0.500704
1935 1 0.448503 0.500175 0.946174
721 1 0.491651 0.258201 0.627727
564 1 0.503656 0.191373 0.560527
596 1 0.500402 0.311641 0.559814
568 1 0.629315 0.184952 0.564979
595 1 0.561062 0.253082 0.565631
600 1 0.625505 0.307957 0.560769
690 1 0.560744 0.190575 0.626375
722 1 0.561456 0.315625 0.618198
725 1 0.625799 0.24946 0.625492
789 1 0.61885 0.00734062 0.748956
570 1 0.814184 0.182703 0.501935
78 1 0.438216 0.312411 1.00304
665 1 0.752874 0.00261037 0.627112
572 1 0.741139 0.183178 0.567767
599 1 0.689218 0.248537 0.554228
603 1 0.810186 0.252611 0.567158
604 1 0.749551 0.311507 0.557736
694 1 0.687824 0.190824 0.626847
698 1 0.812493 0.183955 0.629791
726 1 0.68196 0.313941 0.617797
729 1 0.746262 0.24858 0.632379
730 1 0.808807 0.307248 0.629718
1941 1 0.62173 0.498254 0.873111
21 1 0.6269 0.00794969 0.99765
1691 1 0.820512 0.497396 0.690198
548 1 0.00283809 0.186438 0.565267
705 1 0.00166182 0.247193 0.61972
576 1 0.878626 0.178479 0.561095
607 1 0.93334 0.246663 0.563285
608 1 0.8735 0.309236 0.559815
702 1 0.942306 0.182017 0.625142
733 1 0.870793 0.248315 0.62683
734 1 0.933269 0.309199 0.617454
795 1 0.81502 -0.00372538 0.805631
550 1 0.193983 0.186786 0.497704
535 1 0.69046 -0.0013933 0.569231
797 1 0.876152 -0.0016839 0.747492
611 1 0.0658587 0.37678 0.56619
616 1 0.123608 0.438428 0.560012
738 1 0.0605858 0.442109 0.622499
741 1 0.125665 0.380708 0.628728
612 1 -0.00201522 0.45139 0.564285
659 1 0.563169 0.00488482 0.689962
615 1 0.187631 0.369107 0.557187
619 1 0.30964 0.37787 0.558239
620 1 0.248638 0.436224 0.555471
742 1 0.184851 0.437791 0.622634
745 1 0.252107 0.368738 0.620866
746 1 0.311497 0.441633 0.615892
5 1 0.123701 0.00128225 0.998166
515 1 0.0667217 -0.00113446 0.558097
1947 1 0.816705 0.497605 0.939352
1021 1 0.878178 0.376506 0.876378
574 1 0.939006 0.188574 0.501764
623 1 0.427269 0.372723 0.556906
624 1 0.377236 0.434798 0.555956
749 1 0.372709 0.382905 0.623983
750 1 0.438802 0.434067 0.619962
753 1 0.501703 0.374663 0.625742
1022 1 0.936269 0.44094 0.879778
628 1 0.500253 0.438758 0.563265
627 1 0.557913 0.373972 0.564758
632 1 0.623876 0.439315 0.562862
754 1 0.557261 0.437538 0.626436
757 1 0.624945 0.377232 0.613343
549 1 0.122665 0.121475 0.503447
1023 1 0.942046 0.3785 0.936659
773 1 0.117786 0.00280949 0.752645
518 1 0.19374 0.0597192 0.502231
543 1 0.937128 0.00433244 0.562993
1024 1 0.876867 0.4395 0.939638
631 1 0.693466 0.374216 0.552058
635 1 0.81103 0.377469 0.55938
636 1 0.752031 0.447252 0.569267
758 1 0.69138 0.436043 0.626076
761 1 0.753522 0.370389 0.615986
762 1 0.808161 0.427916 0.628068
783 1 0.437024 0.00631813 0.819447
1017 1 0.75582 0.377893 0.871675
77 1 0.368442 0.248179 0.992583
1673 1 0.255193 0.497036 0.634586
737 1 -0.000683403 0.371161 0.620184
639 1 0.94051 0.365966 0.559034
640 1 0.867998 0.438678 0.573516
765 1 0.86904 0.371939 0.621601
766 1 0.936201 0.442571 0.620366
629 1 0.623037 0.37395 0.50533
1020 1 0.752067 0.436598 0.93124
630 1 0.700135 0.440941 0.497401
648 1 0.120155 0.0577926 0.682725
675 1 0.0637022 0.11848 0.68308
770 1 0.0536175 0.0641295 0.750439
776 1 0.124563 0.0568845 0.816823
803 1 0.0652748 0.122108 0.809574
805 1 0.124519 0.118259 0.751227
644 1 0.000316782 0.0616673 0.687876
772 1 0.998933 0.0636057 0.816206
801 1 0.994468 0.120993 0.748596
566 1 0.68414 0.188227 0.493367
652 1 0.24534 0.0596629 0.681073
679 1 0.189654 0.12519 0.687626
683 1 0.320302 0.119718 0.685306
774 1 0.184292 0.054557 0.747083
778 1 0.318523 0.0639586 0.742903
780 1 0.249647 0.0622429 0.819955
807 1 0.19076 0.121932 0.807155
809 1 0.251315 0.111688 0.743087
811 1 0.305229 0.12876 0.802989
787 1 0.56948 0.00643767 0.816156
1018 1 0.820485 0.43397 0.877546
1019 1 0.814646 0.373112 0.940378
656 1 0.380375 0.0605101 0.694189
687 1 0.436873 0.119241 0.684645
782 1 0.438456 0.0685298 0.759778
784 1 0.369181 0.0643868 0.812417
813 1 0.382364 0.129549 0.747659
815 1 0.440441 0.124569 0.81517
660 1 0.502529 0.0627663 0.690641
817 1 0.498548 0.128215 0.750635
788 1 0.497884 0.0547505 0.81621
664 1 0.622049 0.0623682 0.686687
691 1 0.558903 0.129406 0.684176
786 1 0.567352 0.0720226 0.751426
792 1 0.623084 0.073201 0.814285
819 1 0.556047 0.127852 0.812775
821 1 0.624416 0.134034 0.746849
668 1 0.751951 0.0620383 0.687903
695 1 0.688146 0.126759 0.6915
699 1 0.809175 0.12223 0.688492
790 1 0.683264 0.0644907 0.749757
794 1 0.811762 0.0681347 0.744773
796 1 0.751536 0.0629866 0.814876
823 1 0.688292 0.125351 0.823006
825 1 0.753349 0.121559 0.755322
827 1 0.814152 0.130214 0.814513
1014 1 0.684854 0.445098 0.883555
925 1 0.876962 0.00532832 0.870794
672 1 0.87751 0.0642637 0.687256
703 1 0.939097 0.118981 0.690119
798 1 0.937548 0.0552422 0.749545
800 1 0.874579 0.0693019 0.81012
829 1 0.877353 0.127491 0.749418
831 1 0.93267 0.128688 0.810139
1015 1 0.69476 0.377185 0.935387
680 1 0.121815 0.181253 0.684949
707 1 0.0615525 0.254649 0.685667
712 1 0.127633 0.315216 0.67927
802 1 0.0637555 0.179529 0.741488
808 1 0.132177 0.180926 0.810246
834 1 0.0588117 0.312762 0.747991
835 1 0.0642956 0.247726 0.800214
837 1 0.119784 0.251042 0.740871
840 1 0.123244 0.30747 0.809097
836 1 0.00442914 0.308977 0.818143
833 1 -0.0019944 0.252427 0.744112
684 1 0.256917 0.181314 0.688716
711 1 0.188754 0.249324 0.682009
715 1 0.320504 0.251833 0.686852
716 1 0.24687 0.302731 0.678813
806 1 0.185231 0.188905 0.745401
810 1 0.31254 0.188351 0.74401
812 1 0.245631 0.181641 0.803816
838 1 0.185244 0.305934 0.747573
839 1 0.185921 0.2459 0.810034
841 1 0.250819 0.249768 0.743768
842 1 0.313684 0.315456 0.745562
843 1 0.310117 0.250342 0.80633
844 1 0.249355 0.30682 0.804394
688 1 0.382992 0.184084 0.686315
719 1 0.43226 0.259923 0.693419
720 1 0.370254 0.320976 0.675495
814 1 0.445222 0.193684 0.752124
816 1 0.385068 0.194863 0.810754
845 1 0.373339 0.249977 0.751897
846 1 0.436413 0.309889 0.757601
847 1 0.448823 0.251189 0.818405
848 1 0.370962 0.317178 0.813636
852 1 0.502829 0.316081 0.812388
692 1 0.497096 0.188589 0.683217
849 1 0.504237 0.251314 0.744328
820 1 0.501318 0.190049 0.80933
724 1 0.498796 0.316493 0.68729
696 1 0.624161 0.186618 0.683073
723 1 0.563865 0.258612 0.680991
728 1 0.627756 0.313735 0.681433
818 1 0.561833 0.191836 0.745889
824 1 0.622068 0.189323 0.811481
850 1 0.558511 0.318203 0.745292
851 1 0.55864 0.253735 0.81231
853 1 0.617945 0.256691 0.74442
856 1 0.623676 0.311111 0.816235
700 1 0.754792 0.189905 0.69397
727 1 0.687263 0.245744 0.689056
731 1 0.815438 0.251748 0.691801
732 1 0.746003 0.313519 0.685588
822 1 0.687412 0.18853 0.749446
826 1 0.809723 0.18277 0.752518
828 1 0.741971 0.189252 0.809104
854 1 0.686329 0.312948 0.758585
855 1 0.684697 0.249568 0.809436
857 1 0.749161 0.251547 0.747383
858 1 0.80714 0.309747 0.757056
859 1 0.80599 0.245727 0.809318
860 1 0.742447 0.311817 0.815294
804 1 0.00021206 0.181671 0.808946
676 1 0.994306 0.182305 0.690029
708 1 0.99355 0.316802 0.686558
704 1 0.87495 0.182712 0.684992
735 1 0.933608 0.249122 0.681879
736 1 0.873525 0.31552 0.68667
830 1 0.933099 0.187179 0.74404
832 1 0.872452 0.189503 0.810939
861 1 0.874332 0.25286 0.751128
862 1 0.939084 0.317276 0.745844
863 1 0.941088 0.248462 0.803749
864 1 0.881163 0.315105 0.808624
1939 1 0.56784 0.494767 0.939309
661 1 0.629206 0.000284212 0.626015
739 1 0.0634958 0.373409 0.686728
744 1 0.118183 0.434804 0.687567
866 1 0.0565719 0.437117 0.753045
867 1 0.0631191 0.369466 0.814366
869 1 0.121943 0.369332 0.750676
872 1 0.117711 0.432671 0.806187
1012 1 0.497315 0.432858 0.936369
865 1 0.996343 0.372809 0.753843
740 1 0.00206624 0.432783 0.687238
743 1 0.193447 0.377514 0.684961
747 1 0.303342 0.372295 0.689241
748 1 0.254758 0.440236 0.695296
870 1 0.182118 0.433342 0.752338
871 1 0.18387 0.366664 0.80627
873 1 0.244379 0.368324 0.746812
874 1 0.318804 0.43813 0.751178
875 1 0.308044 0.370531 0.809962
876 1 0.254074 0.430763 0.807345
606 1 0.936968 0.309196 0.502157
655 1 0.437383 0.00665679 0.695609
751 1 0.43894 0.373324 0.684855
752 1 0.376649 0.434519 0.688119
877 1 0.374286 0.371474 0.752206
878 1 0.428588 0.437854 0.758498
879 1 0.437104 0.36717 0.81118
880 1 0.373184 0.434557 0.815367
756 1 0.495611 0.434432 0.694552
1009 1 0.509159 0.372244 0.872123
573 1 0.881129 0.124429 0.500221
1013 1 0.622952 0.379424 0.879079
881 1 0.495857 0.380925 0.752306
884 1 0.498466 0.44055 0.809974
755 1 0.55765 0.379183 0.689368
760 1 0.62762 0.438597 0.683076
882 1 0.563633 0.438727 0.749855
883 1 0.572369 0.376996 0.813484
885 1 0.626881 0.375285 0.747941
888 1 0.6312 0.443931 0.812813
1927 1 0.187905 0.498911 0.927404
759 1 0.689386 0.377416 0.686492
763 1 0.811495 0.369625 0.69107
764 1 0.751898 0.4387 0.690858
886 1 0.684949 0.433539 0.75321
887 1 0.684322 0.380175 0.823718
889 1 0.744435 0.37152 0.747372
890 1 0.808618 0.434282 0.751355
891 1 0.814936 0.372732 0.810319
892 1 0.75317 0.434758 0.809734
771 1 0.0579342 0.00158037 0.818693
1011 1 0.556807 0.374401 0.939545
6 1 0.194211 0.0585361 0.997102
868 1 -0.00175922 0.433561 0.811232
767 1 0.936813 0.381496 0.677128
768 1 0.873536 0.432965 0.684153
893 1 0.876724 0.371224 0.751717
894 1 0.936704 0.434311 0.743943
895 1 0.937469 0.378394 0.810826
896 1 0.873664 0.430258 0.810701
1559 1 0.685294 0.49437 0.566317
898 1 0.0634425 0.0608777 0.877846
904 1 0.134129 0.0687454 0.936198
931 1 0.0696912 0.131068 0.935722
933 1 0.127151 0.120767 0.878297
929 1 0.00113905 0.118701 0.874649
1815 1 0.693335 0.501989 0.804249
1817 1 0.757993 0.496004 0.746983
1821 1 0.879389 0.496596 0.758526
902 1 0.188094 0.0545845 0.876739
906 1 0.311474 0.060729 0.875751
908 1 0.253096 0.0592502 0.929787
935 1 0.192319 0.124408 0.935313
937 1 0.247917 0.124758 0.873504
939 1 0.310631 0.127854 0.93115
2 1 0.0693448 0.0657866 0.997722
1921 1 0.994884 0.4981 0.870296
1010 1 0.567835 0.435131 0.877331
919 1 0.688454 0.00200528 0.937589
910 1 0.438939 0.0616252 0.879397
912 1 0.367163 0.0580942 0.936758
941 1 0.37601 0.127899 0.866906
943 1 0.437075 0.125188 0.941024
945 1 0.499472 0.123747 0.878396
1937 1 0.510718 0.499177 0.875981
1551 1 0.439707 0.500644 0.567488
916 1 0.495988 0.0677352 0.940959
914 1 0.555319 0.0726918 0.881527
920 1 0.623167 0.0646293 0.933647
947 1 0.55483 0.132204 0.944391
949 1 0.614257 0.128257 0.875176
1823 1 0.938662 0.493998 0.808685
33 1 0.00534034 0.127826 0.99607
1675 1 0.314862 0.497351 0.693751
918 1 0.681055 0.0651056 0.87743
922 1 0.816697 0.058888 0.873222
924 1 0.744662 0.0590855 0.927553
951 1 0.686697 0.136973 0.935063
953 1 0.753411 0.128174 0.875349
955 1 0.811083 0.127352 0.942198
900 1 0.0021816 0.0621387 0.935322
926 1 0.933342 0.0624533 0.863589
928 1 0.877257 0.067596 0.932922
957 1 0.869383 0.126124 0.873544
959 1 0.941158 0.129203 0.935072
930 1 0.0647401 0.177885 0.867333
936 1 0.130544 0.184269 0.937499
962 1 0.0663068 0.305328 0.881813
963 1 0.0658943 0.244489 0.935669
965 1 0.126717 0.242035 0.873621
968 1 0.129481 0.308276 0.937862
964 1 -0.000583541 0.311037 0.94027
961 1 0.99434 0.250937 0.874332
932 1 0.997776 0.197939 0.937543
1016 1 0.628307 0.43362 0.95018
1687 1 0.693417 0.501492 0.691245
663 1 0.686161 0.00401678 0.690738
114 1 0.570504 0.437335 1.001
934 1 0.186298 0.186776 0.875543
938 1 0.311071 0.193072 0.863087
940 1 0.250332 0.186375 0.937818
966 1 0.181575 0.306553 0.871559
967 1 0.192721 0.252249 0.936902
969 1 0.252372 0.250601 0.875163
970 1 0.304265 0.310673 0.872437
971 1 0.311699 0.244831 0.929684
972 1 0.25371 0.307015 0.93652
1005 1 0.374329 0.373933 0.87166
777 1 0.255451 0.0029715 0.751217
973 1 0.379542 0.247051 0.865402
944 1 0.372039 0.188123 0.933311
942 1 0.438157 0.188316 0.878309
976 1 0.369805 0.306215 0.927206
975 1 0.435896 0.247932 0.934522
974 1 0.430306 0.313231 0.872697
977 1 0.500552 0.258261 0.875202
948 1 0.495686 0.18583 0.936325
1677 1 0.376964 0.501822 0.622352
1006 1 0.442277 0.430169 0.876585
541 1 0.87402 0.00320159 0.500925
980 1 0.498085 0.312131 0.935926
979 1 0.549874 0.251815 0.943889
952 1 0.623141 0.192658 0.932456
946 1 0.556898 0.193284 0.876866
984 1 0.622682 0.310873 0.938788
978 1 0.568527 0.312663 0.878395
981 1 0.629824 0.255298 0.876312
983 1 0.685726 0.251108 0.93135
956 1 0.749833 0.187422 0.935983
987 1 0.814286 0.253739 0.939638
986 1 0.81579 0.306843 0.869604
954 1 0.815489 0.190044 0.872448
982 1 0.687368 0.313409 0.880013
988 1 0.749744 0.316299 0.940774
985 1 0.747924 0.244245 0.875264
950 1 0.684333 0.19149 0.870922
1008 1 0.378461 0.442398 0.935341
1007 1 0.434752 0.377068 0.941514
657 1 0.498525 0.000420111 0.628454
102 1 0.186605 0.437039 0.998272
913 1 0.502438 0.00988831 0.878256
1671 1 0.180283 0.502194 0.681583
991 1 0.937575 0.259094 0.94277
989 1 0.875646 0.253558 0.869825
990 1 0.936424 0.313724 0.874137
992 1 0.877289 0.313951 0.945716
960 1 0.877933 0.186837 0.931309
958 1 0.932094 0.186279 0.866889
1693 1 0.870376 0.503636 0.625545
1003 1 0.318752 0.374887 0.935746
997 1 0.126992 0.372036 0.873004
1000 1 0.13163 0.43291 0.931887
995 1 0.0602818 0.379171 0.939456
994 1 0.0604543 0.436445 0.870683
597 1 0.626775 0.250646 0.498487
999 1 0.189994 0.368892 0.932821
105 1 0.257063 0.38057 0.997947
998 1 0.190432 0.432998 0.860866
1002 1 0.313888 0.433193 0.87213
1004 1 0.250446 0.437341 0.925369
1001 1 0.250745 0.375602 0.875508
70 1 0.197665 0.315843 0.99896
45 1 0.372374 0.122636 0.99893
101 1 0.125578 0.368893 1.00315
1683 1 0.567658 0.498748 0.687614
1795 1 0.0584064 0.494218 0.814735
1819 1 0.81048 0.493369 0.818873
1793 1 -0.00389568 0.500841 0.743618
927 1 0.940035 0.00788331 0.937208
1801 1 0.248108 0.503756 0.747329
594 1 0.56603 0.31016 0.501341
98 1 0.0612153 0.437482 1.00026
637 1 0.868929 0.374861 0.503704
645 1 0.12729 0.000745204 0.625947
42 1 0.310454 0.181541 0.997198
30 1 0.938765 0.0644042 0.995463
10 1 0.308879 0.0636343 0.992345
61 1 0.872898 0.117212 0.997277
1685 1 0.626208 0.503147 0.622283
26 1 0.810788 0.0596717 0.996296
1539 1 0.0700014 0.499702 0.557503
29 1 0.876066 0.00520873 0.994696
53 1 0.625244 0.126886 1.00059
22 1 0.689645 0.0671419 0.99421
106 1 0.315371 0.435447 0.996622
513 1 -0.000512956 0.00225347 0.502535
41 1 0.245371 0.126869 0.995411
69 1 0.127357 0.246462 1.00088
121 1 0.749297 0.37597 0.999665
38 1 0.190142 0.190749 0.991935
85 1 0.624245 0.250029 0.998823
34 1 0.0660205 0.18824 1.00133
1033 1 0.250709 0.494997 0.995286
9 1 0.250829 -0.00163604 0.994306
577 1 -0.00142726 0.250224 0.505039
545 1 0.999627 0.121259 0.504943
1545 1 0.249257 0.502978 0.506041
89 1 0.751881 0.253761 0.996194
533 1 0.626406 0.00314178 0.509533
37 1 0.1308 0.126732 0.992478
625 1 0.494409 0.378811 0.506289
546 1 0.0612494 0.185066 0.502846
618 1 0.312808 0.438408 0.505624
562 1 0.571258 0.191485 0.505441
634 1 0.806109 0.442629 0.504429
14 1 0.436246 0.0602222 0.991378
1549 1 0.382344 0.49643 0.502916
581 1 0.128455 0.24241 0.503576
522 1 0.299464 0.0621367 0.504889
605 1 0.866638 0.244765 0.508318
530 1 0.562344 0.0606799 0.505396
1049 1 0.748165 0.498881 0.994296
1032 1 0.120951 0.563378 0.0476376
1059 1 0.0649706 0.632268 0.0579308
1154 1 0.0674369 0.57036 0.123988
1189 1 0.128013 0.624838 0.119689
1028 1 0.00150504 0.568431 0.0636123
1055 1 0.933403 0.497455 0.0639099
1051 1 0.808592 0.507409 0.0609862
1036 1 0.245922 0.559319 0.051009
1063 1 0.185781 0.625815 0.0572345
1067 1 0.313211 0.620671 0.0634203
1158 1 0.190277 0.560036 0.11324
1162 1 0.310574 0.560039 0.126669
1193 1 0.245098 0.62998 0.114077
159 1 0.944292 0.999365 0.177577
1040 1 0.371536 0.559543 0.0697809
1071 1 0.437492 0.630411 0.0681242
1166 1 0.43464 0.563705 0.130209
1197 1 0.372658 0.628263 0.123897
1201 1 0.503392 0.619142 0.126901
259 1 0.067672 0.999619 0.322275
1439 1 0.934198 0.501883 0.441714
1044 1 0.498543 0.561886 0.0650171
1048 1 0.627558 0.568135 0.0633922
1075 1 0.566541 0.632657 0.0658062
1170 1 0.564842 0.561244 0.125835
1205 1 0.628852 0.627835 0.132509
129 1 0.999277 1.0004 0.114102
1073 1 0.503013 0.631566 0.00776866
1159 1 0.180923 0.500501 0.175666
1126 1 0.190155 0.940581 -0.00366215
413 1 0.874531 1.00268 0.375328
1052 1 0.746329 0.560225 0.0686458
1079 1 0.693813 0.628381 0.0640824
1083 1 0.813803 0.628039 0.0652456
1174 1 0.690189 0.561045 0.133593
1178 1 0.815694 0.560777 0.126324
1209 1 0.756079 0.624059 0.128454
1042 1 0.562645 0.562689 0.00306958
1287 1 0.1855 0.502047 0.309908
1185 1 0.00321305 0.625299 0.123337
1056 1 0.879918 0.567759 0.0696988
1087 1 0.938851 0.625237 0.0635129
1182 1 0.938991 0.562971 0.127566
1213 1 0.880435 0.632991 0.124443
285 1 0.875486 1.00104 0.253839
155 1 0.810326 0.999827 0.180428
1064 1 0.121047 0.689289 0.0607107
1091 1 0.0587718 0.751887 0.0606368
1096 1 0.13015 0.812555 0.0589272
1186 1 0.0633033 0.688458 0.128003
1218 1 0.0717863 0.815132 0.123539
1221 1 0.128387 0.753226 0.117782
1217 1 0.999495 0.750574 0.120412
1092 1 1.00191 0.818884 0.0651167
1153 1 -0.000748919 0.500346 0.114576
1068 1 0.249317 0.691319 0.0540212
1095 1 0.194624 0.75329 0.0584079
1099 1 0.314018 0.751532 0.0598816
1100 1 0.254145 0.809978 0.0618181
1190 1 0.190097 0.688593 0.117176
1194 1 0.310078 0.685039 0.121151
1222 1 0.197031 0.811917 0.126293
1225 1 0.259602 0.749718 0.124344
1226 1 0.313036 0.811573 0.117187
1181 1 0.880045 0.505502 0.131665
1077 1 0.617681 0.623583 -0.000352205
1072 1 0.370755 0.686866 0.0592649
1103 1 0.442261 0.745091 0.0741411
1104 1 0.376346 0.808399 0.0615335
1198 1 0.442379 0.681275 0.132874
1229 1 0.377797 0.740804 0.128843
1230 1 0.436243 0.809649 0.123348
1233 1 0.506598 0.752892 0.127169
1076 1 0.501202 0.68796 0.0652276
1108 1 0.503099 0.810472 0.0525396
283 1 0.808365 0.997298 0.310623
1080 1 0.634323 0.692113 0.0697499
1107 1 0.563142 0.750133 0.0704957
1112 1 0.622853 0.818168 0.060014
1202 1 0.561965 0.690933 0.131084
1234 1 0.560311 0.817996 0.120578
1237 1 0.627646 0.755317 0.125892
1554 1 0.564339 0.56047 0.494915
1303 1 0.688856 0.495812 0.315732
1084 1 0.7469 0.694193 0.0618114
1111 1 0.68764 0.758559 0.0703406
1115 1 0.813603 0.75689 0.0534356
1116 1 0.754 0.810972 0.0573806
1206 1 0.689402 0.688525 0.135356
1210 1 0.807761 0.697075 0.127079
1238 1 0.688913 0.806849 0.139051
1241 1 0.753569 0.757866 0.120592
1242 1 0.819084 0.815024 0.131764
1542 1 0.191942 0.563203 0.503361
1594 1 0.809938 0.691282 0.501703
1069 1 0.384857 0.625251 0.00123714
1060 1 -0.00285304 0.684599 0.0701945
1088 1 0.871596 0.689675 0.0636234
1119 1 0.938154 0.746741 0.0611025
1120 1 0.874931 0.814246 0.0609959
1214 1 0.939131 0.687919 0.133265
1245 1 0.873427 0.753012 0.132636
1246 1 0.937462 0.804851 0.131027
151 1 0.686024 0.995688 0.185978
1123 1 0.0717475 0.875474 0.0611777
1128 1 0.130738 0.937837 0.0663098
1250 1 0.0667363 0.939358 0.111785
1253 1 0.13292 0.873541 0.123348
1124 1 1.0001 0.935884 0.0465052
1249 1 0.00144773 0.877397 0.126733
1161 1 0.246811 0.498082 0.125085
1066 1 0.315983 0.68402 -0.000247993
1621 1 0.621476 0.75923 0.503779
1307 1 0.821938 0.504375 0.314169
1127 1 0.192561 0.875625 0.0586666
1131 1 0.319814 0.872777 0.0581123
1132 1 0.246905 0.93963 0.0628625
1254 1 0.189022 0.945884 0.128335
1257 1 0.251967 0.873842 0.119193
1258 1 0.317688 0.93996 0.12084
135 1 0.19177 0.996696 0.197442
1558 1 0.690866 0.559231 0.498098
1135 1 0.438401 0.870077 0.0572912
1136 1 0.378434 0.929363 0.0519431
1261 1 0.380052 0.882642 0.124031
1262 1 0.438223 0.936777 0.122588
1265 1 0.496594 0.871629 0.11939
393 1 0.246062 0.996652 0.371812
1508 1 0.995738 0.935461 0.436791
1047 1 0.68307 0.50604 0.0633846
1140 1 0.504648 0.93554 0.0535453
1139 1 0.566049 0.877432 0.0603634
1144 1 0.627908 0.938326 0.0610961
1266 1 0.56164 0.937121 0.117416
1269 1 0.631099 0.878974 0.119472
1536 1 0.872702 0.936907 0.437824
397 1 0.377339 0.998701 0.364159
139 1 0.312868 0.991333 0.188096
1150 1 0.929019 0.94304 -0.00316297
1143 1 0.689685 0.874802 0.0534556
1147 1 0.804918 0.873128 0.0657278
1148 1 0.747246 0.933258 0.0645014
1270 1 0.687309 0.938174 0.122302
1273 1 0.75391 0.868848 0.129799
1274 1 0.817907 0.931058 0.126689
1411 1 0.0701582 0.506655 0.43174
1617 1 0.503419 0.752268 0.498226
1285 1 0.1242 0.504971 0.251402
1167 1 0.443179 0.505192 0.192925
1151 1 0.938172 0.866138 0.0579902
1152 1 0.878895 0.939946 0.0626196
1277 1 0.877403 0.874772 0.122368
1278 1 0.941239 0.93341 0.113395
1417 1 0.249026 0.496321 0.379278
1165 1 0.375413 0.502616 0.132037
1160 1 0.1275 0.567375 0.187563
1187 1 0.0687259 0.627716 0.184853
1282 1 0.0589703 0.564314 0.25399
1288 1 0.125474 0.569128 0.305833
1315 1 0.0628705 0.62279 0.315608
1317 1 0.132232 0.621749 0.246225
1156 1 0.99706 0.561652 0.190445
1535 1 0.938381 0.873185 0.444698
1534 1 0.93628 0.940031 0.376403
1164 1 0.250158 0.561893 0.188668
1191 1 0.195032 0.627863 0.179181
1195 1 0.310686 0.620169 0.192085
1286 1 0.188477 0.563858 0.253459
1290 1 0.30587 0.566102 0.252711
1292 1 0.253332 0.561577 0.317617
1319 1 0.19016 0.63281 0.314665
1321 1 0.244438 0.632114 0.253335
1323 1 0.31568 0.631113 0.315094
1533 1 0.878367 0.874554 0.376356
1537 1 0.997358 0.504555 0.500234
1653 1 0.632223 0.878928 0.496383
1168 1 0.367849 0.560019 0.185335
1199 1 0.429622 0.625623 0.189882
1294 1 0.439096 0.570883 0.246194
1296 1 0.383462 0.562224 0.311321
1325 1 0.375585 0.625265 0.256546
1327 1 0.441645 0.619537 0.310821
1172 1 0.4996 0.564053 0.197188
1297 1 0.503501 0.499674 0.252103
147 1 0.554611 0.989228 0.182022
1329 1 0.503767 0.62942 0.254541
1300 1 0.509869 0.56236 0.315039
1176 1 0.629217 0.56137 0.186713
1203 1 0.569197 0.620738 0.183396
1298 1 0.574169 0.569995 0.249656
1304 1 0.625318 0.561055 0.315126
1331 1 0.570185 0.624957 0.318629
1333 1 0.629751 0.63087 0.252616
1646 1 0.43284 0.943869 0.497465
1180 1 0.750827 0.563217 0.188314
1207 1 0.690983 0.625107 0.195845
1211 1 0.814932 0.629597 0.190719
1302 1 0.684747 0.565796 0.251957
1306 1 0.820071 0.566955 0.250865
1308 1 0.756069 0.565757 0.308239
1335 1 0.694235 0.622515 0.313827
1337 1 0.758579 0.629669 0.255927
1339 1 0.81828 0.626572 0.315915
1419 1 0.322425 0.504436 0.435945
265 1 0.253236 0.994182 0.252349
1038 1 0.442567 0.557861 0.00415695
1175 1 0.691056 0.500021 0.188705
1284 1 0.00411891 0.56361 0.317737
1313 1 0.00168739 0.629318 0.256824
1184 1 0.885366 0.558945 0.195994
1215 1 0.940145 0.624128 0.186944
1310 1 0.944171 0.562509 0.256608
1312 1 0.877081 0.570274 0.311367
1341 1 0.876439 0.626847 0.252194
1343 1 0.938291 0.626053 0.319299
257 1 0.0139147 0.99767 0.256608
1070 1 0.437556 0.690557 0.000289134
1529 1 0.749941 0.879352 0.374555
1192 1 0.135795 0.685409 0.175094
1219 1 0.0707542 0.751504 0.183599
1224 1 0.122884 0.815898 0.187725
1314 1 0.0747162 0.68588 0.246186
1320 1 0.125939 0.69198 0.313433
1346 1 0.0622943 0.812246 0.251545
1347 1 0.0591858 0.747257 0.317531
1349 1 0.129156 0.758604 0.25271
1352 1 0.11726 0.811345 0.31325
1220 1 -0.00360691 0.813083 0.190301
1345 1 0.00413475 0.745628 0.251484
1196 1 0.257184 0.691127 0.184345
1223 1 0.192376 0.749793 0.184873
1227 1 0.313821 0.749672 0.192115
1228 1 0.25456 0.806063 0.184569
1318 1 0.187627 0.696581 0.250726
1322 1 0.308079 0.680789 0.253358
1324 1 0.251201 0.687737 0.318052
1350 1 0.187795 0.81771 0.243995
1351 1 0.191765 0.755664 0.310296
1353 1 0.252414 0.763999 0.253536
1354 1 0.318019 0.813455 0.247413
1355 1 0.309432 0.750677 0.314693
1356 1 0.248713 0.815787 0.322379
1200 1 0.365417 0.686605 0.191036
1231 1 0.442603 0.749614 0.193254
1232 1 0.378574 0.808475 0.18773
1326 1 0.438502 0.687182 0.256356
1328 1 0.375806 0.685884 0.311514
1357 1 0.374174 0.752818 0.248519
1358 1 0.442585 0.810253 0.248772
1359 1 0.441227 0.749492 0.3179
1360 1 0.379178 0.811233 0.308717
1332 1 0.502166 0.687711 0.311741
1204 1 0.504983 0.688143 0.194379
1236 1 0.501205 0.814941 0.187258
1361 1 0.496163 0.747596 0.257443
1364 1 0.499323 0.812043 0.317463
1208 1 0.625149 0.695145 0.193208
1235 1 0.560206 0.753072 0.194128
1240 1 0.618241 0.81553 0.192687
1330 1 0.569051 0.687044 0.250712
1336 1 0.630386 0.686899 0.313843
1362 1 0.561997 0.81178 0.252624
1363 1 0.563841 0.750733 0.315171
1365 1 0.629988 0.756207 0.256854
1368 1 0.622025 0.819865 0.312294
1212 1 0.747233 0.67992 0.193801
1239 1 0.694221 0.743108 0.194724
1243 1 0.81131 0.750096 0.197968
1244 1 0.748857 0.812572 0.197131
1334 1 0.688334 0.69252 0.251789
1338 1 0.815447 0.693327 0.25336
1340 1 0.760245 0.690717 0.312719
1366 1 0.690381 0.81736 0.250409
1367 1 0.688024 0.750722 0.312699
1369 1 0.753891 0.75274 0.259722
1370 1 0.815409 0.819853 0.25463
1371 1 0.814585 0.753478 0.313112
1372 1 0.754696 0.815751 0.321196
1348 1 -0.00187593 0.809388 0.316969
1188 1 0.00304103 0.680817 0.190592
1316 1 -0.00401317 0.688408 0.321162
1216 1 0.878052 0.693121 0.196493
1247 1 0.948052 0.748889 0.190299
1248 1 0.873521 0.812722 0.194594
1342 1 0.938681 0.690997 0.253933
1344 1 0.87924 0.689707 0.313986
1373 1 0.875367 0.752365 0.25658
1374 1 0.935913 0.810952 0.25287
1375 1 0.936627 0.749273 0.310812
1376 1 0.877327 0.809826 0.316846
263 1 0.181527 0.992418 0.308819
1251 1 0.0659151 0.872536 0.181405
1256 1 0.129024 0.937732 0.181225
1378 1 0.0665252 0.937938 0.256343
1379 1 0.0576258 0.869558 0.312482
1381 1 0.123829 0.872597 0.254216
1384 1 0.122643 0.931996 0.3154
1252 1 0.0132071 0.936776 0.196659
1380 1 0.994004 0.93627 0.310249
1526 1 0.688827 0.944373 0.372741
1528 1 0.627509 0.940424 0.434097
1255 1 0.195263 0.873489 0.188617
1259 1 0.313213 0.872354 0.176517
1260 1 0.254369 0.931478 0.188735
1382 1 0.189705 0.930467 0.247489
1383 1 0.181108 0.870127 0.31392
1385 1 0.250557 0.868915 0.252607
1386 1 0.31196 0.92903 0.245733
1387 1 0.30792 0.873024 0.312227
1388 1 0.243368 0.932259 0.312251
1527 1 0.694734 0.878759 0.436542
1263 1 0.437308 0.874484 0.188305
1264 1 0.375385 0.931413 0.18884
1389 1 0.374425 0.871243 0.250464
1390 1 0.431816 0.941921 0.249126
1391 1 0.438312 0.884649 0.306296
1392 1 0.364012 0.929719 0.309492
1396 1 0.49773 0.943202 0.31365
1268 1 0.493264 0.93776 0.189627
1393 1 0.495225 0.877576 0.25145
1530 1 0.81903 0.942696 0.375403
395 1 0.312237 0.99916 0.430631
1267 1 0.550532 0.87927 0.185485
1272 1 0.623526 0.934422 0.1833
1394 1 0.559267 0.940517 0.253453
1395 1 0.556867 0.879406 0.314791
1397 1 0.62402 0.881183 0.254043
1400 1 0.620239 0.943083 0.316776
1271 1 0.681415 0.875865 0.185078
1275 1 0.813615 0.879306 0.191794
1276 1 0.751754 0.939537 0.185884
1398 1 0.683849 0.941334 0.257109
1399 1 0.691194 0.877948 0.313595
1401 1 0.747521 0.880196 0.244893
1402 1 0.817527 0.940384 0.247599
1403 1 0.816581 0.877395 0.317682
1404 1 0.752298 0.936899 0.30952
1081 1 0.750141 0.625008 -0.0020045
273 1 0.49864 0.999816 0.249177
1630 1 0.943108 0.806809 0.501802
1377 1 -0.000212799 0.870546 0.250299
1279 1 0.939547 0.880375 0.186956
1280 1 0.883769 0.935654 0.184174
1405 1 0.878475 0.878253 0.253945
1406 1 0.932784 0.939308 0.251788
1407 1 0.940527 0.869288 0.31259
1408 1 0.880135 0.942973 0.312055
1570 1 0.0655682 0.686868 0.502515
1031 1 0.186604 0.500796 0.0593323
1410 1 0.0666023 0.570297 0.37613
1416 1 0.128336 0.574051 0.436969
1443 1 0.064811 0.62713 0.444077
1445 1 0.12814 0.625373 0.371603
1441 1 0.00586849 0.624699 0.374749
1531 1 0.80276 0.877193 0.438729
1039 1 0.440049 0.496826 0.0636544
1449 1 0.248229 0.627271 0.375734
1451 1 0.313138 0.626251 0.437217
1420 1 0.250122 0.561713 0.429225
1447 1 0.195701 0.634096 0.44169
1414 1 0.183999 0.559503 0.373874
1418 1 0.315313 0.566006 0.372478
403 1 0.562993 0.998672 0.443727
1589 1 0.626404 0.631893 0.49826
1637 1 0.123258 0.866531 0.500566
1422 1 0.443835 0.56123 0.376266
1455 1 0.442365 0.623031 0.439019
1424 1 0.377282 0.570003 0.435442
1453 1 0.37571 0.637276 0.374686
1457 1 0.501903 0.615712 0.372048
1645 1 0.377429 0.880575 0.499704
1281 1 0.00410744 0.500158 0.255738
1428 1 0.505825 0.56462 0.438463
1461 1 0.631412 0.622601 0.37188
1426 1 0.567223 0.561816 0.376071
1432 1 0.634227 0.562468 0.431881
1459 1 0.571378 0.62505 0.431001
1522 1 0.558259 0.944066 0.374345
1430 1 0.694686 0.562899 0.372753
1463 1 0.690431 0.626941 0.44138
1436 1 0.756028 0.559697 0.436273
1434 1 0.81649 0.559064 0.373203
1465 1 0.748809 0.635151 0.377623
1467 1 0.811878 0.624091 0.441311
1155 1 0.0649185 0.505063 0.185119
269 1 0.372214 1.00004 0.249596
1109 1 0.626412 0.75327 -0.00495624
1532 1 0.745848 0.947938 0.43151
1412 1 1.00361 0.568432 0.435125
1525 1 0.630626 0.869476 0.374771
1440 1 0.870749 0.563578 0.441938
1469 1 0.87951 0.631073 0.382142
1471 1 0.933671 0.628268 0.44004
1438 1 0.935742 0.567388 0.378718
1585 1 0.505458 0.623564 0.500925
1474 1 0.0587188 0.812776 0.375533
1480 1 0.12268 0.810831 0.435992
1475 1 0.06246 0.746416 0.428824
1442 1 0.0674876 0.686695 0.37443
1477 1 0.128798 0.75185 0.373045
1448 1 0.126222 0.69192 0.434565
1473 1 1.00068 0.747321 0.380946
1027 1 0.0626289 0.502404 0.0616754
1452 1 0.262136 0.695176 0.442076
1450 1 0.313917 0.686365 0.375168
1446 1 0.184253 0.690693 0.37922
1482 1 0.308543 0.814495 0.37512
1484 1 0.254699 0.80882 0.433152
1478 1 0.190794 0.817645 0.381959
1481 1 0.244024 0.748352 0.375265
1483 1 0.31701 0.752967 0.439245
1479 1 0.188589 0.747701 0.437028
1606 1 0.18586 0.809318 0.500378
1035 1 0.305738 0.497018 0.0531051
1524 1 0.495432 0.943945 0.438352
1456 1 0.373293 0.695012 0.438384
1487 1 0.443028 0.754948 0.440473
1485 1 0.376495 0.754294 0.372254
1488 1 0.375488 0.81575 0.437207
1454 1 0.439537 0.692015 0.376467
1486 1 0.438001 0.817297 0.370469
1492 1 0.496222 0.816338 0.435462
1489 1 0.506059 0.751786 0.374993
1460 1 0.50267 0.680603 0.441802
1171 1 0.558987 0.500212 0.191491
1517 1 0.37649 0.874554 0.376986
1490 1 0.558673 0.818654 0.378926
1458 1 0.562169 0.687744 0.373772
1493 1 0.625841 0.755441 0.372739
1491 1 0.561935 0.756621 0.432142
1496 1 0.619776 0.817777 0.438573
1464 1 0.630405 0.683958 0.435521
385 1 -0.000757979 1.00231 0.377733
267 1 0.308805 0.988867 0.311152
1494 1 0.687295 0.80905 0.377004
1466 1 0.811621 0.691123 0.381939
1499 1 0.815185 0.759312 0.43761
1495 1 0.685979 0.754148 0.450911
1500 1 0.749407 0.816524 0.440885
1497 1 0.749539 0.753108 0.373831
1498 1 0.811587 0.818546 0.381234
1468 1 0.747387 0.697264 0.441653
1462 1 0.688181 0.701491 0.376077
1519 1 0.437258 0.874965 0.437003
1444 1 0.0061251 0.681494 0.434119
1501 1 0.873891 0.747815 0.377141
1504 1 0.877823 0.818183 0.441335
1470 1 0.937973 0.68733 0.382205
1476 1 1.00141 0.813672 0.435854
1472 1 0.873804 0.686974 0.444058
1503 1 0.933943 0.746406 0.433463
1502 1 0.932971 0.809419 0.378517
1569 1 0.999499 0.620748 0.499103
149 1 0.624597 0.993946 0.126296
391 1 0.187217 0.989922 0.441098
1505 1 0.996243 0.877183 0.375199
1037 1 0.377584 0.502054 0.00118493
1597 1 0.874767 0.628517 0.503292
1506 1 0.0644835 0.93414 0.376167
1512 1 0.116208 0.934404 0.442187
1507 1 0.0556118 0.873906 0.440177
1509 1 0.12101 0.87639 0.379567
261 1 0.118588 0.996375 0.247348
1518 1 0.441769 0.945245 0.377124
1523 1 0.563175 0.880455 0.437112
1516 1 0.250503 0.930988 0.439049
1514 1 0.308542 0.934406 0.378935
1511 1 0.187633 0.874684 0.438174
1510 1 0.177966 0.9363 0.369348
1515 1 0.313849 0.874471 0.443383
1513 1 0.24532 0.877689 0.377385
1622 1 0.691633 0.818324 0.499075
1521 1 0.501063 0.881324 0.377963
1520 1 0.377975 0.942735 0.432954
11 1 0.312364 1.00166 0.0573019
1299 1 0.562937 0.50052 0.312702
145 1 0.501808 1.00228 0.120474
1423 1 0.446115 0.504827 0.436958
141 1 0.372799 0.992427 0.12047
1437 1 0.876366 0.503565 0.381944
1311 1 0.939121 0.508832 0.321394
1409 1 0.00539436 0.507406 0.374121
1415 1 0.186307 0.501315 0.434684
1289 1 0.248229 0.508182 0.252656
1305 1 0.75789 0.504452 0.25652
1034 1 0.311093 0.56871 0.000530442
1090 1 0.0562786 0.808634 0.00225965
1082 1 0.804844 0.687052 0.00375585
1605 1 0.125207 0.748447 0.498006
1618 1 0.5549 0.820072 0.48874
1045 1 0.62205 0.505723 0.00898247
1650 1 0.557009 0.933055 0.496391
1601 1 0.00188178 0.750837 0.492189
1566 1 0.934614 0.568411 0.495765
1586 1 0.568691 0.688587 0.488545
517 1 0.124543 1.0007 0.496191
1654 1 0.688091 0.946444 0.501055
1602 1 0.0547597 0.806975 0.495539
1054 1 0.926686 0.568441 0.00103299
25 1 0.752195 0.997639 0.00365253
1130 1 0.308435 0.935058 -0.0016875
1546 1 0.322215 0.563422 0.498041
1046 1 0.695447 0.567179 0.00902163
1590 1 0.686494 0.687051 0.499568
1053 1 0.872416 0.498456 0.00589092
1121 1 0.00469413 0.869822 -0.000139557
1544 1 0.134595 0.569238 0.561782
1571 1 0.0653558 0.627812 0.556138
1666 1 0.0654595 0.568531 0.617954
1701 1 0.121365 0.632087 0.627946
1541 1 0.126504 0.506927 0.496693
1573 1 0.133637 0.62927 0.498697
1933 1 0.387288 0.504604 0.880463
1548 1 0.247103 0.559259 0.569617
1575 1 0.187405 0.626837 0.566238
1579 1 0.308504 0.615493 0.564551
1670 1 0.189609 0.559653 0.622636
1674 1 0.311727 0.559272 0.623704
1705 1 0.251025 0.628325 0.630118
647 1 0.18325 0.996435 0.686861
1626 1 0.810957 0.816351 0.502222
1665 1 0.00926728 0.507999 0.625911
901 1 0.126276 0.999541 0.879954
1552 1 0.375176 0.570315 0.572492
1583 1 0.44226 0.631587 0.564427
1678 1 0.440246 0.559668 0.6247
1709 1 0.377756 0.62904 0.624051
1713 1 0.496077 0.631386 0.627217
903 1 0.189046 0.999235 0.939043
1805 1 0.381373 0.50082 0.754621
799 1 0.936661 0.998777 0.808618
669 1 0.880671 0.996098 0.619248
1556 1 0.497956 0.561635 0.566814
1633 1 0.00165729 0.872699 0.498484
1560 1 0.628434 0.562965 0.56125
1587 1 0.562199 0.626175 0.556135
1682 1 0.568669 0.563052 0.62143
1717 1 0.627933 0.624096 0.630038
1578 1 0.314677 0.687016 0.506842
1561 1 0.75546 0.506742 0.501708
1613 1 0.377715 0.752198 0.501866
1661 1 0.883321 0.871055 0.504331
1564 1 0.747707 0.560811 0.565063
1591 1 0.68113 0.624825 0.56103
1595 1 0.815909 0.620581 0.563376
1686 1 0.688811 0.563548 0.622994
1690 1 0.81913 0.567129 0.622369
1721 1 0.745632 0.626151 0.620235
1098 1 0.308244 0.808769 0.994975
539 1 0.798006 1.00013 0.561349
923 1 0.805489 1.00085 0.939869
1697 1 0.000480246 0.630067 0.621093
1540 1 1.00323 0.565004 0.563328
1568 1 0.87166 0.557682 0.564693
1599 1 0.939982 0.62307 0.568798
1694 1 0.935362 0.55889 0.620879
1725 1 0.875764 0.628476 0.626821
1813 1 0.627186 0.501715 0.74905
1550 1 0.440727 0.559215 0.50017
905 1 0.258062 0.997516 0.870785
1576 1 0.129092 0.686342 0.560445
1603 1 0.0569363 0.75106 0.560693
1608 1 0.122482 0.808733 0.56659
1698 1 0.0590183 0.691363 0.619752
1730 1 0.0662467 0.816211 0.627377
1733 1 0.121237 0.749015 0.625736
1799 1 0.184743 0.494931 0.810931
1572 1 0.000537899 0.688463 0.560035
1803 1 0.310359 0.50295 0.809145
653 1 0.381498 0.994077 0.627298
1580 1 0.252479 0.688537 0.558361
1607 1 0.188651 0.752299 0.566073
1611 1 0.313059 0.751936 0.566636
1612 1 0.248621 0.806624 0.563656
1702 1 0.188585 0.695216 0.62535
1706 1 0.316207 0.6878 0.618356
1734 1 0.194188 0.810965 0.627334
1737 1 0.254169 0.745282 0.633979
1738 1 0.310999 0.809992 0.624547
1582 1 0.445817 0.688036 0.499002
781 1 0.378956 1.00252 0.75415
911 1 0.441905 0.999575 0.935772
1584 1 0.377276 0.689114 0.56222
1615 1 0.446529 0.748372 0.563817
1616 1 0.375356 0.814913 0.567521
1710 1 0.439215 0.691046 0.624695
1741 1 0.371678 0.749805 0.629344
1742 1 0.440015 0.816034 0.624038
1745 1 0.502273 0.746472 0.623879
1588 1 0.504066 0.688278 0.565089
1620 1 0.494772 0.819099 0.562432
1638 1 0.182254 0.933713 0.503459
1592 1 0.620989 0.689056 0.555935
1619 1 0.554248 0.758376 0.56151
1624 1 0.61754 0.819977 0.551087
1714 1 0.568869 0.686118 0.622198
1746 1 0.561815 0.816106 0.625856
1749 1 0.622243 0.749609 0.626297
649 1 0.245991 0.993707 0.624048
1931 1 0.313058 0.500055 0.934414
1596 1 0.744547 0.680224 0.558071
1623 1 0.684372 0.755681 0.561584
1627 1 0.809821 0.739061 0.566815
1628 1 0.7543 0.814753 0.561122
1718 1 0.686372 0.681585 0.620565
1722 1 0.805478 0.686438 0.630644
1750 1 0.680369 0.809829 0.627638
1753 1 0.744436 0.748813 0.628867
1754 1 0.815246 0.804648 0.618057
1577 1 0.252976 0.623366 0.4986
643 1 0.0648344 0.996401 0.693271
1137 1 0.506856 0.876371 0.992337
1729 1 0.993279 0.757839 0.622545
1604 1 1.00331 0.816322 0.559266
1600 1 0.874464 0.686879 0.566361
1631 1 0.945265 0.748485 0.557755
1632 1 0.871746 0.808613 0.557041
1726 1 0.93882 0.689985 0.617674
1757 1 0.878394 0.747418 0.619889
1758 1 0.929603 0.816342 0.626463
651 1 0.309216 0.993704 0.688839
1949 1 0.876246 0.50346 0.871982
1679 1 0.431753 0.49985 0.687012
1635 1 0.0613523 0.878241 0.557866
1640 1 0.127694 0.942677 0.563853
1762 1 0.0691958 0.943321 0.623722
1765 1 0.123615 0.874102 0.625554
523 1 0.308149 0.99379 0.568618
1636 1 1.00024 0.946139 0.560909
897 1 1.00047 0.99903 0.877353
2048 1 0.874113 0.94715 0.935122
1639 1 0.186607 0.86371 0.559335
1643 1 0.315074 0.880613 0.567987
1644 1 0.243553 0.939421 0.566305
1766 1 0.190878 0.935226 0.628425
1769 1 0.25338 0.873827 0.630201
1770 1 0.318433 0.935985 0.627313
1078 1 0.677368 0.685297 1.00265
1093 1 0.11568 0.753331 0.996745
779 1 0.315983 1.00122 0.812088
1647 1 0.436906 0.880906 0.565909
1648 1 0.374608 0.945807 0.55477
1773 1 0.375289 0.876489 0.629204
1774 1 0.444596 0.940148 0.625842
1777 1 0.499445 0.88258 0.623808
1652 1 0.497564 0.942892 0.560466
2047 1 0.937289 0.880324 0.940669
1141 1 0.63021 0.874495 0.996767
1117 1 0.867715 0.746845 0.996943
1651 1 0.559837 0.88034 0.555194
1656 1 0.624532 0.940501 0.564853
1778 1 0.5555 0.942595 0.622088
1781 1 0.620625 0.879597 0.634399
2046 1 0.934526 0.935138 0.874807
1655 1 0.680939 0.877366 0.56367
1659 1 0.815459 0.878501 0.564159
1660 1 0.750678 0.932999 0.563771
1782 1 0.689167 0.941046 0.628097
1785 1 0.755416 0.879153 0.625721
1786 1 0.815941 0.943609 0.622307
2045 1 0.872501 0.87916 0.866844
1567 1 0.935994 0.502379 0.553247
769 1 0.00064214 0.997427 0.752502
1657 1 0.753019 0.877445 0.503322
1142 1 0.691185 0.940949 0.994408
1761 1 0.00148161 0.873909 0.617933
1663 1 0.937373 0.869684 0.565135
1664 1 0.87636 0.932497 0.56191
1789 1 0.873148 0.872322 0.62183
1790 1 0.938627 0.939844 0.618641
1672 1 0.129412 0.564426 0.681472
1699 1 0.0583857 0.630949 0.68279
1794 1 0.0655101 0.563605 0.74357
1800 1 0.12131 0.567384 0.801471
1827 1 0.0586849 0.628708 0.805245
1829 1 0.121744 0.631882 0.747005
1825 1 1.00044 0.626629 0.74745
1796 1 1.00356 0.55788 0.801698
1609 1 0.255656 0.75981 0.499263
917 1 0.627281 1.00463 0.877448
1547 1 0.319156 0.507935 0.556566
2039 1 0.687361 0.874235 0.928593
1676 1 0.253742 0.562665 0.695629
1703 1 0.18598 0.635005 0.678842
1707 1 0.315925 0.620421 0.685645
1798 1 0.187726 0.571155 0.746218
1802 1 0.316876 0.557871 0.747436
1804 1 0.253621 0.563982 0.808222
1831 1 0.184694 0.626829 0.810506
1833 1 0.25109 0.62011 0.750592
1835 1 0.312365 0.624343 0.813096
2038 1 0.68578 0.937491 0.874621
2044 1 0.748407 0.942282 0.936725
1680 1 0.371824 0.562253 0.67853
1711 1 0.435516 0.617409 0.686349
1806 1 0.434445 0.564144 0.75727
1808 1 0.374971 0.565293 0.816768
1837 1 0.378434 0.627001 0.750492
1839 1 0.438677 0.630823 0.813397
1684 1 0.495188 0.562706 0.686756
1841 1 0.492888 0.628396 0.748862
1812 1 0.496025 0.563138 0.809108
2042 1 0.810936 0.940281 0.871543
1688 1 0.633375 0.557063 0.688188
1715 1 0.563032 0.626115 0.683883
1810 1 0.557162 0.563374 0.740774
1816 1 0.621782 0.558007 0.810863
1843 1 0.56621 0.611837 0.808336
1845 1 0.626788 0.620298 0.74666
2043 1 0.817903 0.881651 0.929149
1692 1 0.758054 0.561343 0.684612
1719 1 0.69089 0.617705 0.69034
1723 1 0.813386 0.624336 0.687665
1814 1 0.696141 0.561502 0.749991
1818 1 0.814839 0.561489 0.74824
1820 1 0.756312 0.557813 0.814648
1847 1 0.687099 0.617859 0.808668
1849 1 0.752704 0.617423 0.749264
1851 1 0.812958 0.621644 0.812031
2041 1 0.749042 0.875481 0.86944
1681 1 0.504179 0.501048 0.632517
1146 1 0.811498 0.933513 0.999237
1668 1 0.999262 0.564372 0.685922
1696 1 0.871884 0.559029 0.687866
1727 1 0.937579 0.618379 0.686094
1822 1 0.931731 0.556757 0.746397
1824 1 0.873486 0.562471 0.81119
1853 1 0.870482 0.628017 0.748362
1855 1 0.934744 0.623793 0.806472
1689 1 0.746919 0.497827 0.630395
1704 1 0.119418 0.696716 0.688245
1731 1 0.0568617 0.75246 0.688149
1736 1 0.131401 0.812745 0.684024
1826 1 0.0558298 0.694915 0.742189
1832 1 0.130914 0.686142 0.811432
1858 1 0.052344 0.811487 0.746053
1859 1 0.0576264 0.748587 0.815192
1861 1 0.118697 0.756607 0.745298
1864 1 0.12337 0.811862 0.814629
1828 1 0.99261 0.68678 0.809322
1732 1 0.988392 0.820025 0.679782
1708 1 0.257648 0.685049 0.693454
1735 1 0.182701 0.748585 0.684595
1739 1 0.312846 0.752334 0.688961
1740 1 0.25211 0.808388 0.692787
1830 1 0.184828 0.691558 0.743008
1834 1 0.314999 0.689019 0.754194
1836 1 0.245994 0.685496 0.804279
1862 1 0.183929 0.813728 0.748288
1863 1 0.190508 0.752724 0.81294
1865 1 0.25003 0.751789 0.74943
1866 1 0.316892 0.815146 0.756527
1867 1 0.311636 0.749389 0.806466
1868 1 0.245457 0.815687 0.813534
1712 1 0.383081 0.687386 0.681568
1743 1 0.437854 0.749668 0.685523
1744 1 0.382377 0.814357 0.685583
1838 1 0.434368 0.685247 0.749891
1840 1 0.37459 0.685533 0.814932
1869 1 0.372075 0.750049 0.741441
1870 1 0.438686 0.811466 0.746235
1871 1 0.436232 0.742897 0.813529
1872 1 0.376594 0.801017 0.813769
1873 1 0.499425 0.752072 0.755127
1748 1 0.496463 0.804747 0.688338
1876 1 0.500672 0.813664 0.81345
1716 1 0.507547 0.693799 0.691867
1844 1 0.49871 0.691047 0.817627
1720 1 0.621726 0.687452 0.695913
1747 1 0.564095 0.75921 0.692417
1752 1 0.616316 0.819644 0.690759
1842 1 0.557103 0.680396 0.758476
1848 1 0.62819 0.688847 0.812968
1874 1 0.568067 0.816846 0.756587
1875 1 0.565397 0.752244 0.812997
1877 1 0.62985 0.751454 0.752682
1880 1 0.625102 0.811408 0.81376
1724 1 0.745072 0.683725 0.688449
1751 1 0.684143 0.742292 0.687761
1755 1 0.811839 0.75369 0.682312
1756 1 0.747555 0.813375 0.684349
1846 1 0.684725 0.68511 0.750883
1850 1 0.811153 0.689568 0.752303
1852 1 0.752961 0.686788 0.81341
1878 1 0.682465 0.815805 0.745719
1879 1 0.691231 0.747327 0.810531
1881 1 0.748606 0.742871 0.748111
1882 1 0.806237 0.817215 0.742852
1883 1 0.815464 0.750134 0.81085
1884 1 0.749976 0.808512 0.803583
1860 1 0.994163 0.819791 0.812464
1857 1 0.993006 0.755407 0.752128
1700 1 0.994231 0.685956 0.693134
1728 1 0.869069 0.689254 0.685107
1759 1 0.936158 0.747463 0.690786
1760 1 0.866139 0.81751 0.68304
1854 1 0.932916 0.685628 0.750689
1856 1 0.87686 0.69163 0.813633
1885 1 0.875083 0.7612 0.750198
1886 1 0.936952 0.818359 0.743556
1887 1 0.936241 0.756159 0.810992
1888 1 0.867092 0.816742 0.807666
1929 1 0.250979 0.504651 0.869361
1763 1 0.055176 0.874759 0.687691
1768 1 0.123841 0.940374 0.689319
1890 1 0.0571391 0.935873 0.748182
1891 1 0.0593564 0.871684 0.810103
1893 1 0.123273 0.878038 0.753097
1896 1 0.119174 0.945956 0.806688
1892 1 0.994878 0.93612 0.815163
1764 1 0.996307 0.936857 0.687122
1889 1 0.994145 0.875141 0.751926
1133 1 0.379043 0.871081 0.997867
1669 1 0.119422 0.503251 0.617285
1030 1 0.187436 0.554807 0.994522
1106 1 0.568677 0.819846 0.993445
1767 1 0.189537 0.872611 0.687092
1771 1 0.318669 0.870282 0.689856
1772 1 0.25147 0.935473 0.695051
1894 1 0.18209 0.939818 0.750219
1895 1 0.186558 0.878414 0.805333
1897 1 0.24949 0.868537 0.754193
1898 1 0.319845 0.934604 0.749548
1899 1 0.307372 0.876692 0.814482
1900 1 0.248482 0.937432 0.803743
1026 1 0.0498763 0.560868 0.989944
1775 1 0.445673 0.882464 0.686736
1776 1 0.383011 0.937578 0.693698
1901 1 0.377959 0.867432 0.755212
1902 1 0.444128 0.937075 0.75775
1903 1 0.436134 0.863543 0.812572
1904 1 0.37606 0.93781 0.813522
1780 1 0.501805 0.942462 0.69404
1908 1 0.504599 0.953919 0.811764
1905 1 0.495901 0.875342 0.752625
2013 1 0.871575 0.750996 0.869243
2015 1 0.938295 0.750569 0.930005
2036 1 0.500873 0.941637 0.936822
1779 1 0.560746 0.883199 0.684418
1784 1 0.619919 0.943817 0.691852
1906 1 0.562492 0.940205 0.757501
1907 1 0.556995 0.879183 0.809979
1909 1 0.620985 0.881011 0.748226
1912 1 0.626736 0.937581 0.80983
2012 1 0.755414 0.810129 0.932544
2027 1 0.31677 0.874288 0.94321
1574 1 0.190861 0.698689 0.505638
1980 1 0.756715 0.683542 0.935936
521 1 0.251412 0.991386 0.498102
2014 1 0.937455 0.814282 0.870698
1783 1 0.687522 0.876578 0.685128
1787 1 0.812129 0.88051 0.684211
1788 1 0.757001 0.937092 0.679107
1910 1 0.687824 0.941319 0.747009
1911 1 0.691283 0.877312 0.813852
1913 1 0.751787 0.877152 0.743678
1914 1 0.812314 0.938929 0.741541
1915 1 0.808454 0.881021 0.808867
1916 1 0.743313 0.93862 0.812318
2016 1 0.872644 0.812189 0.934236
1791 1 0.939027 0.881666 0.678496
1792 1 0.880028 0.933814 0.681202
1917 1 0.873803 0.880719 0.749202
1918 1 0.937068 0.937998 0.744971
1919 1 0.93156 0.87882 0.815387
1920 1 0.874084 0.938237 0.810412
2040 1 0.628864 0.936304 0.932222
1984 1 0.872812 0.680981 0.936847
2011 1 0.813746 0.746798 0.940182
1922 1 0.0596914 0.567052 0.863176
1928 1 0.12345 0.558921 0.936939
1955 1 0.0543501 0.620615 0.92521
1957 1 0.120232 0.626082 0.868301
1924 1 0.991624 0.563645 0.933558
1058 1 0.0531478 0.696823 0.999081
2006 1 0.693256 0.806072 0.870316
2021 1 0.125754 0.876682 0.87861
1930 1 0.315599 0.564898 0.876059
1926 1 0.178129 0.560378 0.870224
1963 1 0.31093 0.625389 0.934339
1961 1 0.252212 0.62314 0.877507
1932 1 0.24973 0.566317 0.937639
1959 1 0.189568 0.628697 0.935868
1667 1 0.0658348 0.501423 0.686048
2034 1 0.567865 0.94188 0.869721
2018 1 0.054933 0.939984 0.873809
1061 1 0.114945 0.62805 0.991198
2035 1 0.560531 0.8851 0.930813
1965 1 0.375025 0.62284 0.878758
1934 1 0.448352 0.561512 0.881063
1936 1 0.378419 0.561888 0.942599
1967 1 0.437428 0.620888 0.942056
2019 1 0.0623548 0.876037 0.938692
1809 1 0.493483 0.503569 0.748572
2024 1 0.11878 0.93914 0.932123
641 1 0.00205385 0.997031 0.629643
1940 1 0.504143 0.560464 0.944607
1969 1 0.49757 0.618526 0.875018
1938 1 0.564034 0.566866 0.879803
1944 1 0.62119 0.563956 0.940969
1971 1 0.561101 0.619081 0.944221
1973 1 0.625883 0.627473 0.873558
1145 1 0.746894 0.882261 0.993058
1634 1 0.0595531 0.937503 0.498997
1065 1 0.243766 0.634011 0.997355
1942 1 0.687802 0.564146 0.87447
1946 1 0.813531 0.562692 0.876643
1977 1 0.747678 0.619313 0.872274
1979 1 0.813909 0.618713 0.936599
1948 1 0.75067 0.564326 0.936889
1975 1 0.681626 0.623804 0.940907
2028 1 0.253318 0.932225 0.937943
2023 1 0.18633 0.876244 0.943373
1125 1 0.123 0.86594 0.991615
1953 1 0.993451 0.620999 0.864374
1950 1 0.939383 0.558738 0.869746
1981 1 0.872477 0.621842 0.873212
1983 1 0.931023 0.621471 0.935096
1952 1 0.870641 0.563297 0.934851
2037 1 0.621855 0.875133 0.873139
2030 1 0.437579 0.942046 0.870052
2026 1 0.31863 0.929724 0.880056
2020 1 0.00163609 0.938695 0.942029
1988 1 1.00533 0.811992 0.931842
1986 1 0.0560025 0.808891 0.872239
1987 1 0.0550637 0.74821 0.934576
1954 1 0.0622545 0.681499 0.866087
1992 1 0.119199 0.808877 0.930461
1989 1 0.124824 0.746583 0.869264
1960 1 0.12189 0.683943 0.93183
1985 1 0.993652 0.745145 0.868813
1956 1 1.00209 0.682623 0.925889
2025 1 0.249024 0.873286 0.876219
1995 1 0.314969 0.747686 0.936454
1996 1 0.250518 0.810218 0.930884
1991 1 0.180648 0.748642 0.931033
1958 1 0.190119 0.681381 0.874984
1964 1 0.249652 0.685443 0.935843
1993 1 0.257369 0.749553 0.865466
1962 1 0.314812 0.690228 0.877041
1990 1 0.186638 0.817722 0.870756
1994 1 0.309792 0.814576 0.87294
2032 1 0.383818 0.937826 0.936434
1982 1 0.935144 0.680511 0.871909
2029 1 0.371466 0.869577 0.864497
1999 1 0.437377 0.749241 0.942543
1998 1 0.436173 0.801838 0.873971
1966 1 0.436665 0.687257 0.879155
1968 1 0.377098 0.682305 0.939302
1997 1 0.374944 0.749726 0.877028
2000 1 0.368775 0.810003 0.931387
1972 1 0.499853 0.682281 0.937261
2031 1 0.440037 0.870022 0.9305
1113 1 0.745355 0.749676 0.998809
2004 1 0.504905 0.81664 0.929621
2001 1 0.502078 0.749337 0.87424
2005 1 0.627006 0.748564 0.873325
2002 1 0.562102 0.812034 0.879427
1970 1 0.561494 0.686876 0.868087
1976 1 0.625573 0.68187 0.934247
2003 1 0.566551 0.743973 0.932396
2008 1 0.633488 0.814242 0.936699
2033 1 0.493058 0.880755 0.868123
2017 1 0.996191 0.878787 0.875634
1074 1 0.566316 0.694526 1.00157
2022 1 0.191428 0.938228 0.871475
2010 1 0.804611 0.807074 0.86528
2007 1 0.680102 0.74731 0.935135
2009 1 0.753912 0.74509 0.874446
1978 1 0.816786 0.681976 0.869183
1974 1 0.690609 0.685261 0.868542
1129 1 0.251497 0.873221 0.994404
909 1 0.377137 1.00006 0.879002
1094 1 0.191946 0.80251 0.990798
1555 1 0.565859 0.500925 0.561863
1057 1 -0.00309626 0.629372 0.997874
1642 1 0.314314 0.942804 0.495962
1062 1 0.178184 0.689267 0.995083
1658 1 0.815447 0.940968 0.502516
793 1 0.743105 1.00288 0.748977
671 1 0.941433 0.995691 0.686931
1089 1 0.990399 0.746802 0.995184
1923 1 0.05581 0.498101 0.930794
1097 1 0.250951 0.744118 0.991543
1797 1 0.123376 0.501534 0.752231
1085 1 0.873127 0.629855 1.00244
1101 1 0.371896 0.750454 1.00422
667 1 0.813528 1.00075 0.685281
527 1 0.437471 1.00358 0.564627
531 1 0.56325 0.9973 0.564851
1105 1 0.49917 0.749498 0.995392
1114 1 0.81342 0.822531 0.996507
1102 1 0.439032 0.814091 0.994953
1110 1 0.699646 0.815527 0.990578
1581 1 0.381701 0.627057 0.504204
13 1 0.372918 0.998796 0.999534
1050 1 0.816684 0.565566 1.00689
1807 1 0.439654 0.499143 0.821484
907 1 0.317687 0.99607 0.93502
1943 1 0.682512 0.504109 0.937564
1811 1 0.557465 0.505707 0.809291
1641 1 0.253317 0.869057 0.501578
775 1 0.184156 0.999021 0.810872
1538 1 0.06898 0.560485 0.501483
1134 1 0.439629 0.930118 0.989881
1662 1 0.937476 0.934607 0.502692
1138 1 0.568217 0.93067 0.995241
1086 1 0.933867 0.68585 0.999331
1122 1 0.067583 0.939773 0.99392
1562 1 0.81253 0.563951 0.509341
1625 1 0.75164 0.75589 0.505127
1598 1 0.940507 0.685858 0.500397
1629 1 0.873073 0.749917 0.501831
1118 1 0.930604 0.806269 0.992672
1593 1 0.7484 0.618693 0.501168
1149 1 0.87778 0.878256 0.996482
1649 1 0.492737 0.880062 0.504117
1610 1 0.315196 0.822664 0.510869
1614 1 0.432621 0.813445 0.50591
|
[
"ITEM: TIMESTEP\n6500\nITEM: NUMBER OF ATOMS\n2048\nITEM: BOX BOUNDS pp pp pp\n4.9685525364850847e-01 4.6703144746345622e+01\n4.9685525364850847e-01 4.6703144746345622e+01\n4.9685525364850847e-01 4.6703144746345622e+01\nITEM: ATOMS id type xs ys zs\n8 1 0.124027 0.0605757 0.062418\n35 1 0.0614849 0.121145 0.0650287\n130 1 0.0645043 0.0582864 0.120941\n165 1 0.130173 0.122103 0.127058\n161 1 1.00136 0.115003 0.128503\n4 1 0.996325 0.0662328 0.0571884\n1565 1 0.870323 0.495485 0.49821\n1413 1 0.127806 0.499275 0.375213\n12 1 0.252837 0.0578218 0.0634762\n39 1 0.190508 0.118691 0.0639576\n43 1 0.310843 0.121484 0.0561942\n134 1 0.18167 0.0589925 0.128465\n138 1 0.319261 0.062565 0.131304\n169 1 0.250336 0.117968 0.128686\n277 1 0.618997 0.00398747 0.252927\n1157 1 0.122789 0.499935 0.116823\n133 1 0.125913 -0.00272777 0.12472\n58 1 0.814673 0.192289 0.00254142\n275 1 0.557807 0.00325537 0.313229\n1183 1 0.943285 0.493774 0.183806\n16 1 0.375018 0.0582813 0.0577936\n47 1 0.434503 0.123485 0.0638688\n142 1 0.427413 0.0615475 0.123744\n173 1 0.372169 0.125608 0.111977\n20 1 0.492284 0.0593561 0.0608375\n177 1 0.496252 0.119442 0.123938\n93 1 0.874942 0.250757 0.0010056\n411 1 0.811879 0.00332744 0.439538\n117 1 0.625717 0.374268 0.000258183\n24 1 0.625516 0.0656289 0.0564604\n51 1 0.564969 0.125251 0.0668679\n146 1 0.5733 0.0606442 0.125472\n181 1 0.627221 0.12984 0.128317\n405 1 0.620858 0.003977 0.383241\n15 1 0.435702 0.00215418 0.0630545\n387 1 0.0638477 -0.00175607 0.441134\n28 1 0.751636 0.0633116 0.0595221\n55 1 0.684992 0.125645 0.0641966\n59 1 0.814003 0.124356 0.0628433\n150 1 0.686925 0.0699205 0.11995\n154 1 0.805352 0.0633346 0.13076\n185 1 0.746029 0.125353 0.135949\n484 1 0.00171759 0.442589 0.438175\n74 1 0.31012 0.307245 0.00299423\n509 1 0.878912 0.376288 0.376782\n32 1 0.874877 0.0620149 0.059696\n63 1 0.932534 0.124042 0.0588462\n158 1 0.939869 0.0608514 0.12221\n189 1 0.875234 0.1174 0.121355\n143 1 0.430621 0.00466889 0.189029\n1177 1 0.75204 0.501446 0.133197\n529 1 0.495458 -0.00226026 0.50253\n1557 1 0.629923 0.500215 0.499978\n40 1 0.136528 0.180503 0.0644352\n67 1 0.0658326 0.248693 0.0655847\n72 1 0.131755 0.308875 0.060752\n162 1 0.0675224 0.189568 0.117399\n194 1 0.064514 0.304757 0.129507\n197 1 0.133687 0.246392 0.124427\n193 1 0.996525 0.245634 0.119422\n36 1 0.995317 0.181659 0.0661975\n1291 1 0.318706 0.498354 0.323872\n44 1 0.250883 0.176513 0.0711533\n71 1 0.191047 0.242423 0.0633227\n75 1 0.31185 0.248532 0.0631177\n76 1 0.244812 0.306949 0.0711252\n166 1 0.190707 0.179176 0.129502\n170 1 0.307854 0.178417 0.135041\n198 1 0.18714 0.31414 0.131208\n201 1 0.247739 0.241865 0.12682\n202 1 0.302465 0.309016 0.134541\n1295 1 0.445437 0.500532 0.317285\n48 1 0.368525 0.18207 0.0619188\n79 1 0.422121 0.243006 0.0638909\n80 1 0.37014 0.31818 0.0674595\n174 1 0.430015 0.181384 0.124454\n205 1 0.370911 0.248145 0.127913\n206 1 0.432924 0.312347 0.133316\n1427 1 0.565654 0.499866 0.442012\n613 1 0.11537 0.371701 0.498388\n84 1 0.493524 0.307556 0.0687192\n52 1 0.492688 0.192069 0.0693322\n209 1 0.486029 0.246221 0.12885\n56 1 0.620968 0.192495 0.0616076\n83 1 0.553884 0.248284 0.0659291\n88 1 0.61996 0.306062 0.0615521\n178 1 0.558423 0.177515 0.131433\n210 1 0.564663 0.312841 0.126163\n213 1 0.624975 0.245988 0.128908\n157 1 0.878391 -0.00589931 0.121414\n60 1 0.750646 0.187359 0.0575136\n87 1 0.687928 0.250715 0.0637152\n91 1 0.815725 0.252044 0.066574\n92 1 0.74598 0.316699 0.0557074\n182 1 0.68818 0.191972 0.127295\n186 1 0.812809 0.183343 0.127641\n214 1 0.682622 0.316223 0.123406\n217 1 0.749495 0.247396 0.12145\n218 1 0.815788 0.31112 0.122137\n81 1 0.491892 0.248407 0.00882873\n90 1 0.810774 0.311417 0.000868988\n50 1 0.555727 0.188245 -0.00247212\n68 1 1.00389 0.311877 0.065874\n64 1 0.870664 0.183789 0.0624047\n95 1 0.939224 0.25651 0.064857\n96 1 0.874704 0.313049 0.0687962\n190 1 0.932282 0.181748 0.123478\n221 1 0.87772 0.247722 0.126263\n222 1 0.935708 0.318695 0.131287\n99 1 0.0696689 0.372862 0.0664065\n104 1 0.131305 0.435554 0.0588486\n226 1 0.0664863 0.430828 0.125965\n229 1 0.124798 0.371951 0.130198\n100 1 0.997314 0.431954 0.0680139\n225 1 -0.00190256 0.37467 0.130167\n510 1 0.944098 0.442331 0.378386\n1041 1 0.505275 0.50067 0.00114927\n97 1 0.00172254 0.37153 0.00742765\n578 1 0.0544702 0.319196 0.495246\n511 1 0.942042 0.377171 0.432706\n103 1 0.195137 0.37578 0.0567009\n107 1 0.310282 0.377196 0.0686541\n108 1 0.250182 0.439657 0.0675703\n230 1 0.180525 0.435806 0.127331\n233 1 0.246205 0.375471 0.126651\n234 1 0.309776 0.436098 0.129058\n554 1 0.309855 0.178932 0.493457\n585 1 0.249927 0.24732 0.49875\n7 1 0.189044 0.000690827 0.0631238\n512 1 0.870766 0.438943 0.43906\n111 1 0.440811 0.372327 0.0648447\n112 1 0.371813 0.444238 0.0624082\n237 1 0.375517 0.371081 0.134109\n238 1 0.430934 0.438851 0.121749\n241 1 0.498573 0.367865 0.126547\n409 1 0.747653 0.00888879 0.369429\n598 1 0.692484 0.311376 0.498251\n1431 1 0.687969 0.49723 0.438761\n502 1 0.684855 0.436524 0.378128\n633 1 0.753741 0.374694 0.497921\n116 1 0.501033 0.431408 0.0652212\n115 1 0.559635 0.370211 0.0665195\n120 1 0.619962 0.441394 0.0653725\n242 1 0.559135 0.438765 0.123515\n245 1 0.618984 0.377004 0.118307\n271 1 0.442346 0.00527261 0.310839\n505 1 0.749432 0.372475 0.37712\n508 1 0.75271 0.436338 0.432385\n119 1 0.685608 0.375044 0.0621977\n123 1 0.815916 0.376048 0.0683995\n124 1 0.747576 0.446185 0.065936\n246 1 0.686674 0.436805 0.122022\n249 1 0.748657 0.379764 0.124147\n250 1 0.814418 0.441908 0.127937\n1043 1 0.560306 0.499064 0.0593482\n557 1 0.365569 0.113592 0.495461\n127 1 0.937828 0.376257 0.0711333\n128 1 0.877317 0.440769 0.0624117\n253 1 0.878352 0.369804 0.125212\n254 1 0.934559 0.442893 0.121074\n1025 1 0.999194 0.496348 0.00106045\n614 1 0.179656 0.43412 0.501727\n137 1 0.249616 0.00550752 0.129811\n136 1 0.123268 0.0607639 0.190718\n163 1 0.0628846 0.113558 0.186653\n258 1 0.0658255 0.0624736 0.256695\n264 1 0.12364 0.0606682 0.315599\n291 1 0.0599016 0.121471 0.31657\n293 1 0.121781 0.130973 0.24671\n289 1 1.00159 0.120993 0.245513\n260 1 -0.000472387 0.0639423 0.314381\n132 1 0.000682919 0.0575405 0.192423\n126 1 0.938948 0.428128 0.00857947\n279 1 0.678366 0.00491174 0.319319\n506 1 0.817322 0.436679 0.37126\n27 1 0.817235 0.000450522 0.0683754\n140 1 0.250379 0.0597291 0.187952\n167 1 0.183992 0.117129 0.194752\n171 1 0.311073 0.120223 0.190118\n262 1 0.18475 0.0551277 0.252322\n266 1 0.307051 0.0590576 0.251856\n268 1 0.242403 0.0593037 0.312658\n295 1 0.182415 0.121115 0.315735\n297 1 0.25083 0.122748 0.254938\n299 1 0.313755 0.115702 0.308522\n1425 1 0.501417 0.499325 0.378082\n144 1 0.374098 0.064756 0.196403\n175 1 0.435512 0.122684 0.187457\n270 1 0.435135 0.0668614 0.249018\n272 1 0.374187 0.0548594 0.306823\n301 1 0.376711 0.129133 0.255169\n303 1 0.434387 0.131941 0.314953\n276 1 0.498955 0.0665224 0.314225\n148 1 0.499916 0.0595792 0.18794\n503 1 0.686997 0.369418 0.443897\n17 1 0.497508 0.00448316 -0.00558166\n507 1 0.812275 0.380391 0.438211\n113 1 0.497254 0.375713 0.00556926\n305 1 0.499531 0.126634 0.252858\n152 1 0.624497 0.0673638 0.189018\n179 1 0.557925 0.120938 0.188599\n274 1 0.558138 0.0658253 0.251003\n280 1 0.620122 0.0682037 0.315529\n307 1 0.56155 0.124764 0.308768\n309 1 0.624358 0.128795 0.250428\n23 1 0.684706 0.00636802 0.0594369\n500 1 0.509937 0.433271 0.433719\n156 1 0.743596 0.0629906 0.19936\n183 1 0.688608 0.129086 0.195791\n187 1 0.812634 0.123141 0.197569\n278 1 0.678089 0.0631939 0.255429\n282 1 0.811867 0.0592246 0.251986\n284 1 0.745023 0.0677282 0.314747\n311 1 0.683314 0.125062 0.318181\n313 1 0.749327 0.130111 0.254661\n315 1 0.809611 0.122172 0.31124\n122 1 0.810712 0.441372 0.000493079\n73 1 0.249941 0.242503 0.000514448\n1433 1 0.752771 0.495223 0.369611\n160 1 0.875401 0.0504209 0.188455\n191 1 0.937905 0.120875 0.186442\n286 1 0.94472 0.0567765 0.24683\n288 1 0.875883 0.0617148 0.311749\n317 1 0.883134 0.112723 0.252505\n319 1 0.942168 0.121884 0.312002\n168 1 0.115476 0.182756 0.184413\n195 1 0.0649902 0.246654 0.191526\n200 1 0.126594 0.308746 0.193061\n290 1 0.058512 0.17972 0.244089\n296 1 0.124301 0.178861 0.316615\n322 1 0.0655303 0.309343 0.254661\n323 1 0.0709781 0.241779 0.309312\n325 1 0.12692 0.237948 0.251868\n328 1 0.118211 0.311533 0.320929\n292 1 0.996531 0.186329 0.302648\n164 1 0.998181 0.180362 0.183193\n172 1 0.244621 0.176577 0.193373\n199 1 0.187637 0.245455 0.194802\n203 1 0.304195 0.249678 0.187391\n204 1 0.248967 0.317646 0.197785\n294 1 0.187941 0.18056 0.253768\n298 1 0.308857 0.186429 0.249586\n300 1 0.247682 0.181296 0.316466\n326 1 0.184492 0.311418 0.257699\n327 1 0.184542 0.245521 0.316425\n329 1 0.242035 0.24233 0.259707\n330 1 0.306619 0.311528 0.251924\n331 1 0.311807 0.252624 0.319602\n332 1 0.248341 0.312579 0.316844\n176 1 0.373497 0.184047 0.186061\n207 1 0.42723 0.251668 0.190508\n208 1 0.368531 0.308078 0.18682\n302 1 0.428914 0.190226 0.248706\n304 1 0.376583 0.194425 0.308203\n333 1 0.36769 0.250006 0.253011\n334 1 0.439226 0.312841 0.249097\n335 1 0.441382 0.250684 0.320989\n336 1 0.37543 0.309711 0.307327\n308 1 0.501513 0.184651 0.312744\n340 1 0.501616 0.30865 0.314456\n212 1 0.500756 0.309201 0.190172\n337 1 0.502262 0.241899 0.248972\n180 1 0.493451 0.185159 0.188629\n184 1 0.624368 0.193636 0.19182\n211 1 0.563423 0.249957 0.184113\n216 1 0.625519 0.310562 0.187969\n306 1 0.560878 0.184517 0.254767\n312 1 0.631143 0.18093 0.318663\n338 1 0.559874 0.303418 0.249749\n339 1 0.559683 0.241977 0.314251\n341 1 0.623177 0.252116 0.253985\n344 1 0.629875 0.308359 0.317041\n188 1 0.752821 0.191241 0.186901\n215 1 0.69559 0.252309 0.185479\n219 1 0.819435 0.250797 0.191451\n220 1 0.74763 0.313327 0.178131\n310 1 0.686896 0.195398 0.251782\n314 1 0.812754 0.192917 0.258555\n316 1 0.752698 0.188965 0.315656\n342 1 0.686564 0.313496 0.252078\n343 1 0.689237 0.247685 0.309423\n345 1 0.74892 0.249878 0.249197\n346 1 0.811406 0.308188 0.25187\n347 1 0.81875 0.250946 0.321581\n348 1 0.755438 0.315777 0.316008\n321 1 0.999606 0.248135 0.249935\n324 1 0.993876 0.307738 0.312355\n196 1 0.999007 0.307381 0.18941\n192 1 0.87089 0.178101 0.189719\n223 1 0.939118 0.248259 0.185047\n224 1 0.883168 0.317696 0.190648\n318 1 0.933993 0.189033 0.244628\n320 1 0.87202 0.185341 0.307387\n349 1 0.877421 0.251499 0.252975\n350 1 0.940137 0.311507 0.252816\n351 1 0.939859 0.24799 0.318805\n352 1 0.874368 0.316834 0.317728\n499 1 0.555184 0.365599 0.439897\n227 1 0.0647456 0.371273 0.190329\n232 1 0.124583 0.434478 0.189705\n354 1 0.0669612 0.431787 0.254179\n355 1 0.064788 0.377342 0.31271\n357 1 0.131148 0.371076 0.257144\n360 1 0.130613 0.439204 0.300775\n228 1 0.00728271 0.44024 0.185099\n19 1 0.566652 -0.00163079 0.058782\n110 1 0.435717 0.438942 -0.000257669\n525 1 0.360794 0.00575281 0.494624\n231 1 0.187012 0.372214 0.196066\n235 1 0.311317 0.37529 0.192315\n236 1 0.251967 0.43683 0.193885\n358 1 0.191557 0.44789 0.242394\n359 1 0.18701 0.374869 0.319764\n361 1 0.24928 0.373258 0.258267\n362 1 0.313415 0.445651 0.262509\n363 1 0.315602 0.37598 0.309315\n364 1 0.245996 0.44047 0.310606\n1429 1 0.624708 0.499103 0.371317\n153 1 0.747639 -0.000631503 0.120315\n239 1 0.44124 0.375757 0.187203\n240 1 0.376954 0.443707 0.192774\n365 1 0.374527 0.378328 0.248048\n366 1 0.436453 0.43589 0.248598\n367 1 0.439627 0.367749 0.314111\n368 1 0.385527 0.433549 0.316943\n497 1 0.499839 0.369221 0.375874\n372 1 0.499826 0.436886 0.310235\n244 1 0.503429 0.434361 0.186006\n369 1 0.501923 0.370369 0.253136\n243 1 0.558488 0.374983 0.189406\n248 1 0.624827 0.430709 0.187476\n370 1 0.56689 0.442478 0.250112\n371 1 0.561149 0.374134 0.310788\n373 1 0.625072 0.373715 0.251778\n376 1 0.628733 0.432278 0.312873\n247 1 0.683686 0.366734 0.188671\n251 1 0.814362 0.366025 0.184953\n252 1 0.74701 0.440539 0.185893\n374 1 0.686176 0.43166 0.252046\n375 1 0.687918 0.371188 0.316454\n377 1 0.749434 0.370136 0.24922\n378 1 0.816705 0.439087 0.256631\n379 1 0.809076 0.375965 0.316562\n380 1 0.745565 0.436077 0.309598\n589 1 0.371753 0.250045 0.496157\n353 1 0.00788098 0.367565 0.250467\n356 1 0.99718 0.438745 0.315084\n255 1 0.941326 0.382527 0.192346\n256 1 0.873201 0.435747 0.195414\n381 1 0.875707 0.376381 0.255791\n382 1 0.940351 0.441406 0.252682\n383 1 0.935774 0.379252 0.308786\n384 1 0.879702 0.444468 0.318488\n1421 1 0.387341 0.498342 0.381125\n386 1 0.0609837 0.0565484 0.381766\n392 1 0.128668 0.0669427 0.4367\n419 1 0.0638796 0.128805 0.43778\n421 1 0.120949 0.121984 0.379628\n31 1 0.937082 0.00591135 0.0545817\n610 1 0.0530512 0.43093 0.504601\n131 1 0.0621833 0.00159992 0.183919\n498 1 0.562614 0.435481 0.374076\n390 1 0.187162 0.056546 0.375046\n394 1 0.306984 0.0571942 0.370316\n396 1 0.245012 0.0535329 0.437898\n423 1 0.190406 0.124384 0.439035\n425 1 0.243418 0.115585 0.376094\n427 1 0.30392 0.112358 0.438195\n1179 1 0.816692 0.497155 0.192362\n389 1 0.124457 0.00111881 0.38541\n609 1 0.997493 0.373117 0.50312\n398 1 0.43167 0.0628864 0.364112\n400 1 0.366753 0.057034 0.42991\n429 1 0.372302 0.128624 0.374237\n431 1 0.439989 0.127175 0.43792\n433 1 0.501738 0.123174 0.376603\n66 1 0.0650898 0.308205 0.00150124\n1173 1 0.623262 0.498178 0.121873\n569 1 0.748734 0.121966 0.491584\n404 1 0.499252 0.0653222 0.445695\n402 1 0.56683 0.0652 0.372242\n408 1 0.62293 0.0617481 0.445155\n435 1 0.560045 0.120778 0.437169\n437 1 0.622114 0.122679 0.375228\n407 1 0.681257 0.00286107 0.438156\n1283 1 0.0658446 0.493303 0.312967\n406 1 0.682015 0.0592976 0.380177\n410 1 0.810901 0.0613096 0.374595\n412 1 0.748909 0.0580794 0.433987\n439 1 0.679618 0.126531 0.43126\n441 1 0.746213 0.126221 0.374985\n443 1 0.817721 0.130413 0.437919\n125 1 0.869662 0.37589 0.000552369\n501 1 0.624687 0.363253 0.377092\n388 1 0.998338 0.0683009 0.439124\n417 1 -0.00135206 0.13365 0.37019\n414 1 0.936732 0.057311 0.375475\n416 1 0.87422 0.0616343 0.434946\n445 1 0.874607 0.126393 0.372443\n447 1 0.93364 0.123683 0.440269\n3 1 0.0657494 0.00227119 0.0580267\n86 1 0.683824 0.308084 7.60568e-05\n602 1 0.808504 0.314023 0.498755\n418 1 0.064053 0.188313 0.380322\n424 1 0.128178 0.181541 0.444005\n450 1 0.0625978 0.31681 0.378572\n451 1 0.0577866 0.250003 0.448774\n453 1 0.119992 0.244312 0.383466\n456 1 0.119953 0.311814 0.437327\n415 1 0.940147 0.00475326 0.441377\n399 1 0.432256 0.00593643 0.437519\n1293 1 0.374643 0.501536 0.258921\n1435 1 0.810988 0.495766 0.433425\n422 1 0.185894 0.183326 0.37601\n426 1 0.312306 0.190203 0.368122\n428 1 0.24908 0.182075 0.426937\n454 1 0.183928 0.310287 0.37589\n455 1 0.182994 0.251669 0.440213\n457 1 0.249526 0.25254 0.369494\n458 1 0.311206 0.310221 0.368863\n459 1 0.304608 0.245284 0.431679\n460 1 0.246923 0.315473 0.430955\n430 1 0.43626 0.183994 0.372971\n432 1 0.368742 0.185317 0.439554\n461 1 0.372236 0.247213 0.375983\n462 1 0.428484 0.311182 0.375432\n463 1 0.431572 0.248338 0.440379\n464 1 0.369794 0.308766 0.434793\n287 1 0.938772 0.00776434 0.311414\n465 1 0.492889 0.251471 0.381942\n468 1 0.499232 0.313551 0.445064\n436 1 0.497359 0.188272 0.439065\n434 1 0.562793 0.187729 0.373549\n469 1 0.625915 0.245429 0.379721\n472 1 0.62359 0.314789 0.442278\n466 1 0.562194 0.310657 0.377684\n440 1 0.621087 0.184626 0.435541\n467 1 0.563074 0.244683 0.437228\n1301 1 0.626887 0.503727 0.25245\n473 1 0.752826 0.24702 0.374159\n474 1 0.813076 0.316064 0.378058\n444 1 0.748683 0.185082 0.435461\n470 1 0.691333 0.305509 0.378587\n475 1 0.813033 0.243049 0.436951\n471 1 0.688243 0.248044 0.43095\n442 1 0.812451 0.18549 0.370249\n476 1 0.750596 0.307279 0.43913\n438 1 0.686827 0.18495 0.376162\n401 1 0.497018 0.00842888 0.377021\n478 1 0.929586 0.312109 0.3773\n480 1 0.872356 0.309202 0.437645\n452 1 0.994892 0.310977 0.439209\n477 1 0.876743 0.250502 0.375201\n449 1 0.00282274 0.248486 0.371144\n420 1 0.995044 0.192891 0.435616\n479 1 0.931663 0.252385 0.441295\n446 1 0.937824 0.18827 0.375533\n448 1 0.88237 0.191146 0.434684\n582 1 0.192244 0.311619 0.494016\n488 1 0.122908 0.444159 0.444666\n481 1 0.993727 0.366406 0.372701\n485 1 0.12117 0.380009 0.377254\n482 1 0.0507113 0.436993 0.371791\n483 1 0.0602252 0.382038 0.433053\n542 1 0.935626 0.0598473 0.500464\n281 1 0.747592 0.00235886 0.255016\n534 1 0.693851 0.0602757 0.502332\n489 1 0.250237 0.377889 0.370053\n490 1 0.311443 0.436374 0.378032\n492 1 0.255675 0.440887 0.443732\n486 1 0.191797 0.431766 0.380066\n487 1 0.179041 0.376409 0.439087\n491 1 0.304823 0.377773 0.436222\n494 1 0.44491 0.43084 0.381673\n495 1 0.434639 0.37199 0.441584\n493 1 0.377579 0.381815 0.377202\n496 1 0.371574 0.437599 0.439107\n46 1 0.431633 0.182811 0.00216911\n1163 1 0.309669 0.495882 0.187366\n1169 1 0.497154 0.491335 0.123158\n504 1 0.624316 0.437527 0.441794\n558 1 0.430481 0.188045 0.501441\n1309 1 0.87859 0.498554 0.250651\n617 1 0.246097 0.372995 0.497134\n1029 1 0.124414 0.50288 -0.00358049\n1 1 -0.00208964 0.00314588 5.55231e-05\n565 1 0.620448 0.125003 0.49347\n621 1 0.367045 0.374817 0.502418\n622 1 0.437126 0.436892 0.496016\n109 1 0.371237 0.377199 0.0119068\n586 1 0.30469 0.315295 0.495106\n18 1 0.56148 0.062934 0.00186725\n601 1 0.750852 0.242206 0.499288\n118 1 0.68983 0.433984 0.000145475\n537 1 0.747263 -0.00193983 0.497173\n82 1 0.553537 0.3077 0.00519466\n49 1 0.492124 0.124732 0.00823674\n514 1 0.0673188 0.0635847 0.501523\n57 1 0.754102 0.125314 -0.00173848\n590 1 0.437072 0.309926 0.496419\n94 1 0.939383 0.321701 0.0082238\n65 1 0.000239016 0.247812 0.00379049\n638 1 0.943796 0.44032 0.495812\n62 1 0.940211 0.191425 0.00189741\n1553 1 0.500397 0.495068 0.499819\n54 1 0.689505 0.186635 0.0027856\n520 1 0.127603 0.0622117 0.565183\n547 1 0.0631131 0.122656 0.567881\n642 1 0.0612492 0.0585906 0.625194\n677 1 0.121076 0.121381 0.629607\n785 1 0.496362 0.00841942 0.753455\n516 1 -0.00154086 0.0592635 0.563651\n593 1 0.504343 0.248209 0.501225\n524 1 0.244303 0.05953 0.564971\n551 1 0.186865 0.125324 0.574765\n555 1 0.320306 0.12252 0.556392\n646 1 0.183474 0.0656005 0.622654\n650 1 0.310706 0.0562472 0.622457\n681 1 0.259631 0.121921 0.62528\n996 1 0.00167628 0.439425 0.931046\n553 1 0.24621 0.120386 0.51071\n915 1 0.557974 0.00493583 0.938331\n528 1 0.37464 0.0496381 0.56262\n559 1 0.436853 0.124361 0.561481\n654 1 0.434082 0.0583116 0.62292\n685 1 0.36903 0.116295 0.623792\n532 1 0.492344 0.0651676 0.559476\n1563 1 0.811827 0.500203 0.570691\n526 1 0.427598 0.0548452 0.501905\n993 1 0.998342 0.371902 0.87563\n689 1 0.500413 0.126235 0.621008\n536 1 0.621731 0.0619033 0.563691\n563 1 0.565215 0.133656 0.56212\n658 1 0.557306 0.066005 0.624396\n693 1 0.628033 0.127984 0.620134\n1695 1 0.937551 0.494861 0.681911\n1951 1 0.938166 0.494317 0.944389\n519 1 0.187692 -0.00378461 0.562053\n921 1 0.750724 -0.00241393 0.871053\n1543 1 0.192208 0.498408 0.565881\n540 1 0.747358 0.0628483 0.56718\n567 1 0.68912 0.126539 0.550381\n571 1 0.803782 0.128605 0.566076\n662 1 0.68198 0.0648257 0.62191\n666 1 0.81684 0.05902 0.626529\n697 1 0.749708 0.1227 0.631331\n1945 1 0.754548 0.500444 0.87813\n673 1 0.999209 0.12144 0.631371\n544 1 0.870506 0.0624727 0.560337\n575 1 0.938582 0.12108 0.562576\n670 1 0.935185 0.0632801 0.626948\n701 1 0.878793 0.119152 0.62505\n626 1 0.560622 0.438164 0.504211\n899 1 0.0632838 0.00400554 0.935061\n1925 1 0.123052 0.494398 0.870594\n561 1 0.499508 0.122711 0.501306\n552 1 0.126782 0.183619 0.56491\n579 1 0.0709448 0.24925 0.562507\n584 1 0.125158 0.307272 0.56567\n674 1 0.0582969 0.190661 0.634613\n706 1 0.0600732 0.31309 0.624821\n709 1 0.124207 0.251809 0.624289\n580 1 0.00720022 0.316454 0.562972\n556 1 0.249799 0.183595 0.562724\n583 1 0.190532 0.247386 0.561695\n587 1 0.31711 0.245321 0.560865\n588 1 0.248698 0.303362 0.562334\n678 1 0.190978 0.185961 0.624944\n682 1 0.317604 0.189321 0.626988\n710 1 0.188729 0.308784 0.62192\n713 1 0.25373 0.243353 0.625458\n714 1 0.314344 0.315304 0.615486\n791 1 0.685033 0.00504165 0.814471\n560 1 0.375206 0.194185 0.563436\n591 1 0.44191 0.248749 0.561016\n592 1 0.373701 0.310411 0.558331\n686 1 0.442007 0.192721 0.622581\n717 1 0.382213 0.252724 0.621751\n718 1 0.431126 0.317189 0.619437\n538 1 0.813723 0.0672602 0.500704\n1935 1 0.448503 0.500175 0.946174\n721 1 0.491651 0.258201 0.627727\n564 1 0.503656 0.191373 0.560527\n596 1 0.500402 0.311641 0.559814\n568 1 0.629315 0.184952 0.564979\n595 1 0.561062 0.253082 0.565631\n600 1 0.625505 0.307957 0.560769\n690 1 0.560744 0.190575 0.626375\n722 1 0.561456 0.315625 0.618198\n725 1 0.625799 0.24946 0.625492\n789 1 0.61885 0.00734062 0.748956\n570 1 0.814184 0.182703 0.501935\n78 1 0.438216 0.312411 1.00304\n665 1 0.752874 0.00261037 0.627112\n572 1 0.741139 0.183178 0.567767\n599 1 0.689218 0.248537 0.554228\n603 1 0.810186 0.252611 0.567158\n604 1 0.749551 0.311507 0.557736\n694 1 0.687824 0.190824 0.626847\n698 1 0.812493 0.183955 0.629791\n726 1 0.68196 0.313941 0.617797\n729 1 0.746262 0.24858 0.632379\n730 1 0.808807 0.307248 0.629718\n1941 1 0.62173 0.498254 0.873111\n21 1 0.6269 0.00794969 0.99765\n1691 1 0.820512 0.497396 0.690198\n548 1 0.00283809 0.186438 0.565267\n705 1 0.00166182 0.247193 0.61972\n576 1 0.878626 0.178479 0.561095\n607 1 0.93334 0.246663 0.563285\n608 1 0.8735 0.309236 0.559815\n702 1 0.942306 0.182017 0.625142\n733 1 0.870793 0.248315 0.62683\n734 1 0.933269 0.309199 0.617454\n795 1 0.81502 -0.00372538 0.805631\n550 1 0.193983 0.186786 0.497704\n535 1 0.69046 -0.0013933 0.569231\n797 1 0.876152 -0.0016839 0.747492\n611 1 0.0658587 0.37678 0.56619\n616 1 0.123608 0.438428 0.560012\n738 1 0.0605858 0.442109 0.622499\n741 1 0.125665 0.380708 0.628728\n612 1 -0.00201522 0.45139 0.564285\n659 1 0.563169 0.00488482 0.689962\n615 1 0.187631 0.369107 0.557187\n619 1 0.30964 0.37787 0.558239\n620 1 0.248638 0.436224 0.555471\n742 1 0.184851 0.437791 0.622634\n745 1 0.252107 0.368738 0.620866\n746 1 0.311497 0.441633 0.615892\n5 1 0.123701 0.00128225 0.998166\n515 1 0.0667217 -0.00113446 0.558097\n1947 1 0.816705 0.497605 0.939352\n1021 1 0.878178 0.376506 0.876378\n574 1 0.939006 0.188574 0.501764\n623 1 0.427269 0.372723 0.556906\n624 1 0.377236 0.434798 0.555956\n749 1 0.372709 0.382905 0.623983\n750 1 0.438802 0.434067 0.619962\n753 1 0.501703 0.374663 0.625742\n1022 1 0.936269 0.44094 0.879778\n628 1 0.500253 0.438758 0.563265\n627 1 0.557913 0.373972 0.564758\n632 1 0.623876 0.439315 0.562862\n754 1 0.557261 0.437538 0.626436\n757 1 0.624945 0.377232 0.613343\n549 1 0.122665 0.121475 0.503447\n1023 1 0.942046 0.3785 0.936659\n773 1 0.117786 0.00280949 0.752645\n518 1 0.19374 0.0597192 0.502231\n543 1 0.937128 0.00433244 0.562993\n1024 1 0.876867 0.4395 0.939638\n631 1 0.693466 0.374216 0.552058\n635 1 0.81103 0.377469 0.55938\n636 1 0.752031 0.447252 0.569267\n758 1 0.69138 0.436043 0.626076\n761 1 0.753522 0.370389 0.615986\n762 1 0.808161 0.427916 0.628068\n783 1 0.437024 0.00631813 0.819447\n1017 1 0.75582 0.377893 0.871675\n77 1 0.368442 0.248179 0.992583\n1673 1 0.255193 0.497036 0.634586\n737 1 -0.000683403 0.371161 0.620184\n639 1 0.94051 0.365966 0.559034\n640 1 0.867998 0.438678 0.573516\n765 1 0.86904 0.371939 0.621601\n766 1 0.936201 0.442571 0.620366\n629 1 0.623037 0.37395 0.50533\n1020 1 0.752067 0.436598 0.93124\n630 1 0.700135 0.440941 0.497401\n648 1 0.120155 0.0577926 0.682725\n675 1 0.0637022 0.11848 0.68308\n770 1 0.0536175 0.0641295 0.750439\n776 1 0.124563 0.0568845 0.816823\n803 1 0.0652748 0.122108 0.809574\n805 1 0.124519 0.118259 0.751227\n644 1 0.000316782 0.0616673 0.687876\n772 1 0.998933 0.0636057 0.816206\n801 1 0.994468 0.120993 0.748596\n566 1 0.68414 0.188227 0.493367\n652 1 0.24534 0.0596629 0.681073\n679 1 0.189654 0.12519 0.687626\n683 1 0.320302 0.119718 0.685306\n774 1 0.184292 0.054557 0.747083\n778 1 0.318523 0.0639586 0.742903\n780 1 0.249647 0.0622429 0.819955\n807 1 0.19076 0.121932 0.807155\n809 1 0.251315 0.111688 0.743087\n811 1 0.305229 0.12876 0.802989\n787 1 0.56948 0.00643767 0.816156\n1018 1 0.820485 0.43397 0.877546\n1019 1 0.814646 0.373112 0.940378\n656 1 0.380375 0.0605101 0.694189\n687 1 0.436873 0.119241 0.684645\n782 1 0.438456 0.0685298 0.759778\n784 1 0.369181 0.0643868 0.812417\n813 1 0.382364 0.129549 0.747659\n815 1 0.440441 0.124569 0.81517\n660 1 0.502529 0.0627663 0.690641\n817 1 0.498548 0.128215 0.750635\n788 1 0.497884 0.0547505 0.81621\n664 1 0.622049 0.0623682 0.686687\n691 1 0.558903 0.129406 0.684176\n786 1 0.567352 0.0720226 0.751426\n792 1 0.623084 0.073201 0.814285\n819 1 0.556047 0.127852 0.812775\n821 1 0.624416 0.134034 0.746849\n668 1 0.751951 0.0620383 0.687903\n695 1 0.688146 0.126759 0.6915\n699 1 0.809175 0.12223 0.688492\n790 1 0.683264 0.0644907 0.749757\n794 1 0.811762 0.0681347 0.744773\n796 1 0.751536 0.0629866 0.814876\n823 1 0.688292 0.125351 0.823006\n825 1 0.753349 0.121559 0.755322\n827 1 0.814152 0.130214 0.814513\n1014 1 0.684854 0.445098 0.883555\n925 1 0.876962 0.00532832 0.870794\n672 1 0.87751 0.0642637 0.687256\n703 1 0.939097 0.118981 0.690119\n798 1 0.937548 0.0552422 0.749545\n800 1 0.874579 0.0693019 0.81012\n829 1 0.877353 0.127491 0.749418\n831 1 0.93267 0.128688 0.810139\n1015 1 0.69476 0.377185 0.935387\n680 1 0.121815 0.181253 0.684949\n707 1 0.0615525 0.254649 0.685667\n712 1 0.127633 0.315216 0.67927\n802 1 0.0637555 0.179529 0.741488\n808 1 0.132177 0.180926 0.810246\n834 1 0.0588117 0.312762 0.747991\n835 1 0.0642956 0.247726 0.800214\n837 1 0.119784 0.251042 0.740871\n840 1 0.123244 0.30747 0.809097\n836 1 0.00442914 0.308977 0.818143\n833 1 -0.0019944 0.252427 0.744112\n684 1 0.256917 0.181314 0.688716\n711 1 0.188754 0.249324 0.682009\n715 1 0.320504 0.251833 0.686852\n716 1 0.24687 0.302731 0.678813\n806 1 0.185231 0.188905 0.745401\n810 1 0.31254 0.188351 0.74401\n812 1 0.245631 0.181641 0.803816\n838 1 0.185244 0.305934 0.747573\n839 1 0.185921 0.2459 0.810034\n841 1 0.250819 0.249768 0.743768\n842 1 0.313684 0.315456 0.745562\n843 1 0.310117 0.250342 0.80633\n844 1 0.249355 0.30682 0.804394\n688 1 0.382992 0.184084 0.686315\n719 1 0.43226 0.259923 0.693419\n720 1 0.370254 0.320976 0.675495\n814 1 0.445222 0.193684 0.752124\n816 1 0.385068 0.194863 0.810754\n845 1 0.373339 0.249977 0.751897\n846 1 0.436413 0.309889 0.757601\n847 1 0.448823 0.251189 0.818405\n848 1 0.370962 0.317178 0.813636\n852 1 0.502829 0.316081 0.812388\n692 1 0.497096 0.188589 0.683217\n849 1 0.504237 0.251314 0.744328\n820 1 0.501318 0.190049 0.80933\n724 1 0.498796 0.316493 0.68729\n696 1 0.624161 0.186618 0.683073\n723 1 0.563865 0.258612 0.680991\n728 1 0.627756 0.313735 0.681433\n818 1 0.561833 0.191836 0.745889\n824 1 0.622068 0.189323 0.811481\n850 1 0.558511 0.318203 0.745292\n851 1 0.55864 0.253735 0.81231\n853 1 0.617945 0.256691 0.74442\n856 1 0.623676 0.311111 0.816235\n700 1 0.754792 0.189905 0.69397\n727 1 0.687263 0.245744 0.689056\n731 1 0.815438 0.251748 0.691801\n732 1 0.746003 0.313519 0.685588\n822 1 0.687412 0.18853 0.749446\n826 1 0.809723 0.18277 0.752518\n828 1 0.741971 0.189252 0.809104\n854 1 0.686329 0.312948 0.758585\n855 1 0.684697 0.249568 0.809436\n857 1 0.749161 0.251547 0.747383\n858 1 0.80714 0.309747 0.757056\n859 1 0.80599 0.245727 0.809318\n860 1 0.742447 0.311817 0.815294\n804 1 0.00021206 0.181671 0.808946\n676 1 0.994306 0.182305 0.690029\n708 1 0.99355 0.316802 0.686558\n704 1 0.87495 0.182712 0.684992\n735 1 0.933608 0.249122 0.681879\n736 1 0.873525 0.31552 0.68667\n830 1 0.933099 0.187179 0.74404\n832 1 0.872452 0.189503 0.810939\n861 1 0.874332 0.25286 0.751128\n862 1 0.939084 0.317276 0.745844\n863 1 0.941088 0.248462 0.803749\n864 1 0.881163 0.315105 0.808624\n1939 1 0.56784 0.494767 0.939309\n661 1 0.629206 0.000284212 0.626015\n739 1 0.0634958 0.373409 0.686728\n744 1 0.118183 0.434804 0.687567\n866 1 0.0565719 0.437117 0.753045\n867 1 0.0631191 0.369466 0.814366\n869 1 0.121943 0.369332 0.750676\n872 1 0.117711 0.432671 0.806187\n1012 1 0.497315 0.432858 0.936369\n865 1 0.996343 0.372809 0.753843\n740 1 0.00206624 0.432783 0.687238\n743 1 0.193447 0.377514 0.684961\n747 1 0.303342 0.372295 0.689241\n748 1 0.254758 0.440236 0.695296\n870 1 0.182118 0.433342 0.752338\n871 1 0.18387 0.366664 0.80627\n873 1 0.244379 0.368324 0.746812\n874 1 0.318804 0.43813 0.751178\n875 1 0.308044 0.370531 0.809962\n876 1 0.254074 0.430763 0.807345\n606 1 0.936968 0.309196 0.502157\n655 1 0.437383 0.00665679 0.695609\n751 1 0.43894 0.373324 0.684855\n752 1 0.376649 0.434519 0.688119\n877 1 0.374286 0.371474 0.752206\n878 1 0.428588 0.437854 0.758498\n879 1 0.437104 0.36717 0.81118\n880 1 0.373184 0.434557 0.815367\n756 1 0.495611 0.434432 0.694552\n1009 1 0.509159 0.372244 0.872123\n573 1 0.881129 0.124429 0.500221\n1013 1 0.622952 0.379424 0.879079\n881 1 0.495857 0.380925 0.752306\n884 1 0.498466 0.44055 0.809974\n755 1 0.55765 0.379183 0.689368\n760 1 0.62762 0.438597 0.683076\n882 1 0.563633 0.438727 0.749855\n883 1 0.572369 0.376996 0.813484\n885 1 0.626881 0.375285 0.747941\n888 1 0.6312 0.443931 0.812813\n1927 1 0.187905 0.498911 0.927404\n759 1 0.689386 0.377416 0.686492\n763 1 0.811495 0.369625 0.69107\n764 1 0.751898 0.4387 0.690858\n886 1 0.684949 0.433539 0.75321\n887 1 0.684322 0.380175 0.823718\n889 1 0.744435 0.37152 0.747372\n890 1 0.808618 0.434282 0.751355\n891 1 0.814936 0.372732 0.810319\n892 1 0.75317 0.434758 0.809734\n771 1 0.0579342 0.00158037 0.818693\n1011 1 0.556807 0.374401 0.939545\n6 1 0.194211 0.0585361 0.997102\n868 1 -0.00175922 0.433561 0.811232\n767 1 0.936813 0.381496 0.677128\n768 1 0.873536 0.432965 0.684153\n893 1 0.876724 0.371224 0.751717\n894 1 0.936704 0.434311 0.743943\n895 1 0.937469 0.378394 0.810826\n896 1 0.873664 0.430258 0.810701\n1559 1 0.685294 0.49437 0.566317\n898 1 0.0634425 0.0608777 0.877846\n904 1 0.134129 0.0687454 0.936198\n931 1 0.0696912 0.131068 0.935722\n933 1 0.127151 0.120767 0.878297\n929 1 0.00113905 0.118701 0.874649\n1815 1 0.693335 0.501989 0.804249\n1817 1 0.757993 0.496004 0.746983\n1821 1 0.879389 0.496596 0.758526\n902 1 0.188094 0.0545845 0.876739\n906 1 0.311474 0.060729 0.875751\n908 1 0.253096 0.0592502 0.929787\n935 1 0.192319 0.124408 0.935313\n937 1 0.247917 0.124758 0.873504\n939 1 0.310631 0.127854 0.93115\n2 1 0.0693448 0.0657866 0.997722\n1921 1 0.994884 0.4981 0.870296\n1010 1 0.567835 0.435131 0.877331\n919 1 0.688454 0.00200528 0.937589\n910 1 0.438939 0.0616252 0.879397\n912 1 0.367163 0.0580942 0.936758\n941 1 0.37601 0.127899 0.866906\n943 1 0.437075 0.125188 0.941024\n945 1 0.499472 0.123747 0.878396\n1937 1 0.510718 0.499177 0.875981\n1551 1 0.439707 0.500644 0.567488\n916 1 0.495988 0.0677352 0.940959\n914 1 0.555319 0.0726918 0.881527\n920 1 0.623167 0.0646293 0.933647\n947 1 0.55483 0.132204 0.944391\n949 1 0.614257 0.128257 0.875176\n1823 1 0.938662 0.493998 0.808685\n33 1 0.00534034 0.127826 0.99607\n1675 1 0.314862 0.497351 0.693751\n918 1 0.681055 0.0651056 0.87743\n922 1 0.816697 0.058888 0.873222\n924 1 0.744662 0.0590855 0.927553\n951 1 0.686697 0.136973 0.935063\n953 1 0.753411 0.128174 0.875349\n955 1 0.811083 0.127352 0.942198\n900 1 0.0021816 0.0621387 0.935322\n926 1 0.933342 0.0624533 0.863589\n928 1 0.877257 0.067596 0.932922\n957 1 0.869383 0.126124 0.873544\n959 1 0.941158 0.129203 0.935072\n930 1 0.0647401 0.177885 0.867333\n936 1 0.130544 0.184269 0.937499\n962 1 0.0663068 0.305328 0.881813\n963 1 0.0658943 0.244489 0.935669\n965 1 0.126717 0.242035 0.873621\n968 1 0.129481 0.308276 0.937862\n964 1 -0.000583541 0.311037 0.94027\n961 1 0.99434 0.250937 0.874332\n932 1 0.997776 0.197939 0.937543\n1016 1 0.628307 0.43362 0.95018\n1687 1 0.693417 0.501492 0.691245\n663 1 0.686161 0.00401678 0.690738\n114 1 0.570504 0.437335 1.001\n934 1 0.186298 0.186776 0.875543\n938 1 0.311071 0.193072 0.863087\n940 1 0.250332 0.186375 0.937818\n966 1 0.181575 0.306553 0.871559\n967 1 0.192721 0.252249 0.936902\n969 1 0.252372 0.250601 0.875163\n970 1 0.304265 0.310673 0.872437\n971 1 0.311699 0.244831 0.929684\n972 1 0.25371 0.307015 0.93652\n1005 1 0.374329 0.373933 0.87166\n777 1 0.255451 0.0029715 0.751217\n973 1 0.379542 0.247051 0.865402\n944 1 0.372039 0.188123 0.933311\n942 1 0.438157 0.188316 0.878309\n976 1 0.369805 0.306215 0.927206\n975 1 0.435896 0.247932 0.934522\n974 1 0.430306 0.313231 0.872697\n977 1 0.500552 0.258261 0.875202\n948 1 0.495686 0.18583 0.936325\n1677 1 0.376964 0.501822 0.622352\n1006 1 0.442277 0.430169 0.876585\n541 1 0.87402 0.00320159 0.500925\n980 1 0.498085 0.312131 0.935926\n979 1 0.549874 0.251815 0.943889\n952 1 0.623141 0.192658 0.932456\n946 1 0.556898 0.193284 0.876866\n984 1 0.622682 0.310873 0.938788\n978 1 0.568527 0.312663 0.878395\n981 1 0.629824 0.255298 0.876312\n983 1 0.685726 0.251108 0.93135\n956 1 0.749833 0.187422 0.935983\n987 1 0.814286 0.253739 0.939638\n986 1 0.81579 0.306843 0.869604\n954 1 0.815489 0.190044 0.872448\n982 1 0.687368 0.313409 0.880013\n988 1 0.749744 0.316299 0.940774\n985 1 0.747924 0.244245 0.875264\n950 1 0.684333 0.19149 0.870922\n1008 1 0.378461 0.442398 0.935341\n1007 1 0.434752 0.377068 0.941514\n657 1 0.498525 0.000420111 0.628454\n102 1 0.186605 0.437039 0.998272\n913 1 0.502438 0.00988831 0.878256\n1671 1 0.180283 0.502194 0.681583\n991 1 0.937575 0.259094 0.94277\n989 1 0.875646 0.253558 0.869825\n990 1 0.936424 0.313724 0.874137\n992 1 0.877289 0.313951 0.945716\n960 1 0.877933 0.186837 0.931309\n958 1 0.932094 0.186279 0.866889\n1693 1 0.870376 0.503636 0.625545\n1003 1 0.318752 0.374887 0.935746\n997 1 0.126992 0.372036 0.873004\n1000 1 0.13163 0.43291 0.931887\n995 1 0.0602818 0.379171 0.939456\n994 1 0.0604543 0.436445 0.870683\n597 1 0.626775 0.250646 0.498487\n999 1 0.189994 0.368892 0.932821\n105 1 0.257063 0.38057 0.997947\n998 1 0.190432 0.432998 0.860866\n1002 1 0.313888 0.433193 0.87213\n1004 1 0.250446 0.437341 0.925369\n1001 1 0.250745 0.375602 0.875508\n70 1 0.197665 0.315843 0.99896\n45 1 0.372374 0.122636 0.99893\n101 1 0.125578 0.368893 1.00315\n1683 1 0.567658 0.498748 0.687614\n1795 1 0.0584064 0.494218 0.814735\n1819 1 0.81048 0.493369 0.818873\n1793 1 -0.00389568 0.500841 0.743618\n927 1 0.940035 0.00788331 0.937208\n1801 1 0.248108 0.503756 0.747329\n594 1 0.56603 0.31016 0.501341\n98 1 0.0612153 0.437482 1.00026\n637 1 0.868929 0.374861 0.503704\n645 1 0.12729 0.000745204 0.625947\n42 1 0.310454 0.181541 0.997198\n30 1 0.938765 0.0644042 0.995463\n10 1 0.308879 0.0636343 0.992345\n61 1 0.872898 0.117212 0.997277\n1685 1 0.626208 0.503147 0.622283\n26 1 0.810788 0.0596717 0.996296\n1539 1 0.0700014 0.499702 0.557503\n29 1 0.876066 0.00520873 0.994696\n53 1 0.625244 0.126886 1.00059\n22 1 0.689645 0.0671419 0.99421\n106 1 0.315371 0.435447 0.996622\n513 1 -0.000512956 0.00225347 0.502535\n41 1 0.245371 0.126869 0.995411\n69 1 0.127357 0.246462 1.00088\n121 1 0.749297 0.37597 0.999665\n38 1 0.190142 0.190749 0.991935\n85 1 0.624245 0.250029 0.998823\n34 1 0.0660205 0.18824 1.00133\n1033 1 0.250709 0.494997 0.995286\n9 1 0.250829 -0.00163604 0.994306\n577 1 -0.00142726 0.250224 0.505039\n545 1 0.999627 0.121259 0.504943\n1545 1 0.249257 0.502978 0.506041\n89 1 0.751881 0.253761 0.996194\n533 1 0.626406 0.00314178 0.509533\n37 1 0.1308 0.126732 0.992478\n625 1 0.494409 0.378811 0.506289\n546 1 0.0612494 0.185066 0.502846\n618 1 0.312808 0.438408 0.505624\n562 1 0.571258 0.191485 0.505441\n634 1 0.806109 0.442629 0.504429\n14 1 0.436246 0.0602222 0.991378\n1549 1 0.382344 0.49643 0.502916\n581 1 0.128455 0.24241 0.503576\n522 1 0.299464 0.0621367 0.504889\n605 1 0.866638 0.244765 0.508318\n530 1 0.562344 0.0606799 0.505396\n1049 1 0.748165 0.498881 0.994296\n1032 1 0.120951 0.563378 0.0476376\n1059 1 0.0649706 0.632268 0.0579308\n1154 1 0.0674369 0.57036 0.123988\n1189 1 0.128013 0.624838 0.119689\n1028 1 0.00150504 0.568431 0.0636123\n1055 1 0.933403 0.497455 0.0639099\n1051 1 0.808592 0.507409 0.0609862\n1036 1 0.245922 0.559319 0.051009\n1063 1 0.185781 0.625815 0.0572345\n1067 1 0.313211 0.620671 0.0634203\n1158 1 0.190277 0.560036 0.11324\n1162 1 0.310574 0.560039 0.126669\n1193 1 0.245098 0.62998 0.114077\n159 1 0.944292 0.999365 0.177577\n1040 1 0.371536 0.559543 0.0697809\n1071 1 0.437492 0.630411 0.0681242\n1166 1 0.43464 0.563705 0.130209\n1197 1 0.372658 0.628263 0.123897\n1201 1 0.503392 0.619142 0.126901\n259 1 0.067672 0.999619 0.322275\n1439 1 0.934198 0.501883 0.441714\n1044 1 0.498543 0.561886 0.0650171\n1048 1 0.627558 0.568135 0.0633922\n1075 1 0.566541 0.632657 0.0658062\n1170 1 0.564842 0.561244 0.125835\n1205 1 0.628852 0.627835 0.132509\n129 1 0.999277 1.0004 0.114102\n1073 1 0.503013 0.631566 0.00776866\n1159 1 0.180923 0.500501 0.175666\n1126 1 0.190155 0.940581 -0.00366215\n413 1 0.874531 1.00268 0.375328\n1052 1 0.746329 0.560225 0.0686458\n1079 1 0.693813 0.628381 0.0640824\n1083 1 0.813803 0.628039 0.0652456\n1174 1 0.690189 0.561045 0.133593\n1178 1 0.815694 0.560777 0.126324\n1209 1 0.756079 0.624059 0.128454\n1042 1 0.562645 0.562689 0.00306958\n1287 1 0.1855 0.502047 0.309908\n1185 1 0.00321305 0.625299 0.123337\n1056 1 0.879918 0.567759 0.0696988\n1087 1 0.938851 0.625237 0.0635129\n1182 1 0.938991 0.562971 0.127566\n1213 1 0.880435 0.632991 0.124443\n285 1 0.875486 1.00104 0.253839\n155 1 0.810326 0.999827 0.180428\n1064 1 0.121047 0.689289 0.0607107\n1091 1 0.0587718 0.751887 0.0606368\n1096 1 0.13015 0.812555 0.0589272\n1186 1 0.0633033 0.688458 0.128003\n1218 1 0.0717863 0.815132 0.123539\n1221 1 0.128387 0.753226 0.117782\n1217 1 0.999495 0.750574 0.120412\n1092 1 1.00191 0.818884 0.0651167\n1153 1 -0.000748919 0.500346 0.114576\n1068 1 0.249317 0.691319 0.0540212\n1095 1 0.194624 0.75329 0.0584079\n1099 1 0.314018 0.751532 0.0598816\n1100 1 0.254145 0.809978 0.0618181\n1190 1 0.190097 0.688593 0.117176\n1194 1 0.310078 0.685039 0.121151\n1222 1 0.197031 0.811917 0.126293\n1225 1 0.259602 0.749718 0.124344\n1226 1 0.313036 0.811573 0.117187\n1181 1 0.880045 0.505502 0.131665\n1077 1 0.617681 0.623583 -0.000352205\n1072 1 0.370755 0.686866 0.0592649\n1103 1 0.442261 0.745091 0.0741411\n1104 1 0.376346 0.808399 0.0615335\n1198 1 0.442379 0.681275 0.132874\n1229 1 0.377797 0.740804 0.128843\n1230 1 0.436243 0.809649 0.123348\n1233 1 0.506598 0.752892 0.127169\n1076 1 0.501202 0.68796 0.0652276\n1108 1 0.503099 0.810472 0.0525396\n283 1 0.808365 0.997298 0.310623\n1080 1 0.634323 0.692113 0.0697499\n1107 1 0.563142 0.750133 0.0704957\n1112 1 0.622853 0.818168 0.060014\n1202 1 0.561965 0.690933 0.131084\n1234 1 0.560311 0.817996 0.120578\n1237 1 0.627646 0.755317 0.125892\n1554 1 0.564339 0.56047 0.494915\n1303 1 0.688856 0.495812 0.315732\n1084 1 0.7469 0.694193 0.0618114\n1111 1 0.68764 0.758559 0.0703406\n1115 1 0.813603 0.75689 0.0534356\n1116 1 0.754 0.810972 0.0573806\n1206 1 0.689402 0.688525 0.135356\n1210 1 0.807761 0.697075 0.127079\n1238 1 0.688913 0.806849 0.139051\n1241 1 0.753569 0.757866 0.120592\n1242 1 0.819084 0.815024 0.131764\n1542 1 0.191942 0.563203 0.503361\n1594 1 0.809938 0.691282 0.501703\n1069 1 0.384857 0.625251 0.00123714\n1060 1 -0.00285304 0.684599 0.0701945\n1088 1 0.871596 0.689675 0.0636234\n1119 1 0.938154 0.746741 0.0611025\n1120 1 0.874931 0.814246 0.0609959\n1214 1 0.939131 0.687919 0.133265\n1245 1 0.873427 0.753012 0.132636\n1246 1 0.937462 0.804851 0.131027\n151 1 0.686024 0.995688 0.185978\n1123 1 0.0717475 0.875474 0.0611777\n1128 1 0.130738 0.937837 0.0663098\n1250 1 0.0667363 0.939358 0.111785\n1253 1 0.13292 0.873541 0.123348\n1124 1 1.0001 0.935884 0.0465052\n1249 1 0.00144773 0.877397 0.126733\n1161 1 0.246811 0.498082 0.125085\n1066 1 0.315983 0.68402 -0.000247993\n1621 1 0.621476 0.75923 0.503779\n1307 1 0.821938 0.504375 0.314169\n1127 1 0.192561 0.875625 0.0586666\n1131 1 0.319814 0.872777 0.0581123\n1132 1 0.246905 0.93963 0.0628625\n1254 1 0.189022 0.945884 0.128335\n1257 1 0.251967 0.873842 0.119193\n1258 1 0.317688 0.93996 0.12084\n135 1 0.19177 0.996696 0.197442\n1558 1 0.690866 0.559231 0.498098\n1135 1 0.438401 0.870077 0.0572912\n1136 1 0.378434 0.929363 0.0519431\n1261 1 0.380052 0.882642 0.124031\n1262 1 0.438223 0.936777 0.122588\n1265 1 0.496594 0.871629 0.11939\n393 1 0.246062 0.996652 0.371812\n1508 1 0.995738 0.935461 0.436791\n1047 1 0.68307 0.50604 0.0633846\n1140 1 0.504648 0.93554 0.0535453\n1139 1 0.566049 0.877432 0.0603634\n1144 1 0.627908 0.938326 0.0610961\n1266 1 0.56164 0.937121 0.117416\n1269 1 0.631099 0.878974 0.119472\n1536 1 0.872702 0.936907 0.437824\n397 1 0.377339 0.998701 0.364159\n139 1 0.312868 0.991333 0.188096\n1150 1 0.929019 0.94304 -0.00316297\n1143 1 0.689685 0.874802 0.0534556\n1147 1 0.804918 0.873128 0.0657278\n1148 1 0.747246 0.933258 0.0645014\n1270 1 0.687309 0.938174 0.122302\n1273 1 0.75391 0.868848 0.129799\n1274 1 0.817907 0.931058 0.126689\n1411 1 0.0701582 0.506655 0.43174\n1617 1 0.503419 0.752268 0.498226\n1285 1 0.1242 0.504971 0.251402\n1167 1 0.443179 0.505192 0.192925\n1151 1 0.938172 0.866138 0.0579902\n1152 1 0.878895 0.939946 0.0626196\n1277 1 0.877403 0.874772 0.122368\n1278 1 0.941239 0.93341 0.113395\n1417 1 0.249026 0.496321 0.379278\n1165 1 0.375413 0.502616 0.132037\n1160 1 0.1275 0.567375 0.187563\n1187 1 0.0687259 0.627716 0.184853\n1282 1 0.0589703 0.564314 0.25399\n1288 1 0.125474 0.569128 0.305833\n1315 1 0.0628705 0.62279 0.315608\n1317 1 0.132232 0.621749 0.246225\n1156 1 0.99706 0.561652 0.190445\n1535 1 0.938381 0.873185 0.444698\n1534 1 0.93628 0.940031 0.376403\n1164 1 0.250158 0.561893 0.188668\n1191 1 0.195032 0.627863 0.179181\n1195 1 0.310686 0.620169 0.192085\n1286 1 0.188477 0.563858 0.253459\n1290 1 0.30587 0.566102 0.252711\n1292 1 0.253332 0.561577 0.317617\n1319 1 0.19016 0.63281 0.314665\n1321 1 0.244438 0.632114 0.253335\n1323 1 0.31568 0.631113 0.315094\n1533 1 0.878367 0.874554 0.376356\n1537 1 0.997358 0.504555 0.500234\n1653 1 0.632223 0.878928 0.496383\n1168 1 0.367849 0.560019 0.185335\n1199 1 0.429622 0.625623 0.189882\n1294 1 0.439096 0.570883 0.246194\n1296 1 0.383462 0.562224 0.311321\n1325 1 0.375585 0.625265 0.256546\n1327 1 0.441645 0.619537 0.310821\n1172 1 0.4996 0.564053 0.197188\n1297 1 0.503501 0.499674 0.252103\n147 1 0.554611 0.989228 0.182022\n1329 1 0.503767 0.62942 0.254541\n1300 1 0.509869 0.56236 0.315039\n1176 1 0.629217 0.56137 0.186713\n1203 1 0.569197 0.620738 0.183396\n1298 1 0.574169 0.569995 0.249656\n1304 1 0.625318 0.561055 0.315126\n1331 1 0.570185 0.624957 0.318629\n1333 1 0.629751 0.63087 0.252616\n1646 1 0.43284 0.943869 0.497465\n1180 1 0.750827 0.563217 0.188314\n1207 1 0.690983 0.625107 0.195845\n1211 1 0.814932 0.629597 0.190719\n1302 1 0.684747 0.565796 0.251957\n1306 1 0.820071 0.566955 0.250865\n1308 1 0.756069 0.565757 0.308239\n1335 1 0.694235 0.622515 0.313827\n1337 1 0.758579 0.629669 0.255927\n1339 1 0.81828 0.626572 0.315915\n1419 1 0.322425 0.504436 0.435945\n265 1 0.253236 0.994182 0.252349\n1038 1 0.442567 0.557861 0.00415695\n1175 1 0.691056 0.500021 0.188705\n1284 1 0.00411891 0.56361 0.317737\n1313 1 0.00168739 0.629318 0.256824\n1184 1 0.885366 0.558945 0.195994\n1215 1 0.940145 0.624128 0.186944\n1310 1 0.944171 0.562509 0.256608\n1312 1 0.877081 0.570274 0.311367\n1341 1 0.876439 0.626847 0.252194\n1343 1 0.938291 0.626053 0.319299\n257 1 0.0139147 0.99767 0.256608\n1070 1 0.437556 0.690557 0.000289134\n1529 1 0.749941 0.879352 0.374555\n1192 1 0.135795 0.685409 0.175094\n1219 1 0.0707542 0.751504 0.183599\n1224 1 0.122884 0.815898 0.187725\n1314 1 0.0747162 0.68588 0.246186\n1320 1 0.125939 0.69198 0.313433\n1346 1 0.0622943 0.812246 0.251545\n1347 1 0.0591858 0.747257 0.317531\n1349 1 0.129156 0.758604 0.25271\n1352 1 0.11726 0.811345 0.31325\n1220 1 -0.00360691 0.813083 0.190301\n1345 1 0.00413475 0.745628 0.251484\n1196 1 0.257184 0.691127 0.184345\n1223 1 0.192376 0.749793 0.184873\n1227 1 0.313821 0.749672 0.192115\n1228 1 0.25456 0.806063 0.184569\n1318 1 0.187627 0.696581 0.250726\n1322 1 0.308079 0.680789 0.253358\n1324 1 0.251201 0.687737 0.318052\n1350 1 0.187795 0.81771 0.243995\n1351 1 0.191765 0.755664 0.310296\n1353 1 0.252414 0.763999 0.253536\n1354 1 0.318019 0.813455 0.247413\n1355 1 0.309432 0.750677 0.314693\n1356 1 0.248713 0.815787 0.322379\n1200 1 0.365417 0.686605 0.191036\n1231 1 0.442603 0.749614 0.193254\n1232 1 0.378574 0.808475 0.18773\n1326 1 0.438502 0.687182 0.256356\n1328 1 0.375806 0.685884 0.311514\n1357 1 0.374174 0.752818 0.248519\n1358 1 0.442585 0.810253 0.248772\n1359 1 0.441227 0.749492 0.3179\n1360 1 0.379178 0.811233 0.308717\n1332 1 0.502166 0.687711 0.311741\n1204 1 0.504983 0.688143 0.194379\n1236 1 0.501205 0.814941 0.187258\n1361 1 0.496163 0.747596 0.257443\n1364 1 0.499323 0.812043 0.317463\n1208 1 0.625149 0.695145 0.193208\n1235 1 0.560206 0.753072 0.194128\n1240 1 0.618241 0.81553 0.192687\n1330 1 0.569051 0.687044 0.250712\n1336 1 0.630386 0.686899 0.313843\n1362 1 0.561997 0.81178 0.252624\n1363 1 0.563841 0.750733 0.315171\n1365 1 0.629988 0.756207 0.256854\n1368 1 0.622025 0.819865 0.312294\n1212 1 0.747233 0.67992 0.193801\n1239 1 0.694221 0.743108 0.194724\n1243 1 0.81131 0.750096 0.197968\n1244 1 0.748857 0.812572 0.197131\n1334 1 0.688334 0.69252 0.251789\n1338 1 0.815447 0.693327 0.25336\n1340 1 0.760245 0.690717 0.312719\n1366 1 0.690381 0.81736 0.250409\n1367 1 0.688024 0.750722 0.312699\n1369 1 0.753891 0.75274 0.259722\n1370 1 0.815409 0.819853 0.25463\n1371 1 0.814585 0.753478 0.313112\n1372 1 0.754696 0.815751 0.321196\n1348 1 -0.00187593 0.809388 0.316969\n1188 1 0.00304103 0.680817 0.190592\n1316 1 -0.00401317 0.688408 0.321162\n1216 1 0.878052 0.693121 0.196493\n1247 1 0.948052 0.748889 0.190299\n1248 1 0.873521 0.812722 0.194594\n1342 1 0.938681 0.690997 0.253933\n1344 1 0.87924 0.689707 0.313986\n1373 1 0.875367 0.752365 0.25658\n1374 1 0.935913 0.810952 0.25287\n1375 1 0.936627 0.749273 0.310812\n1376 1 0.877327 0.809826 0.316846\n263 1 0.181527 0.992418 0.308819\n1251 1 0.0659151 0.872536 0.181405\n1256 1 0.129024 0.937732 0.181225\n1378 1 0.0665252 0.937938 0.256343\n1379 1 0.0576258 0.869558 0.312482\n1381 1 0.123829 0.872597 0.254216\n1384 1 0.122643 0.931996 0.3154\n1252 1 0.0132071 0.936776 0.196659\n1380 1 0.994004 0.93627 0.310249\n1526 1 0.688827 0.944373 0.372741\n1528 1 0.627509 0.940424 0.434097\n1255 1 0.195263 0.873489 0.188617\n1259 1 0.313213 0.872354 0.176517\n1260 1 0.254369 0.931478 0.188735\n1382 1 0.189705 0.930467 0.247489\n1383 1 0.181108 0.870127 0.31392\n1385 1 0.250557 0.868915 0.252607\n1386 1 0.31196 0.92903 0.245733\n1387 1 0.30792 0.873024 0.312227\n1388 1 0.243368 0.932259 0.312251\n1527 1 0.694734 0.878759 0.436542\n1263 1 0.437308 0.874484 0.188305\n1264 1 0.375385 0.931413 0.18884\n1389 1 0.374425 0.871243 0.250464\n1390 1 0.431816 0.941921 0.249126\n1391 1 0.438312 0.884649 0.306296\n1392 1 0.364012 0.929719 0.309492\n1396 1 0.49773 0.943202 0.31365\n1268 1 0.493264 0.93776 0.189627\n1393 1 0.495225 0.877576 0.25145\n1530 1 0.81903 0.942696 0.375403\n395 1 0.312237 0.99916 0.430631\n1267 1 0.550532 0.87927 0.185485\n1272 1 0.623526 0.934422 0.1833\n1394 1 0.559267 0.940517 0.253453\n1395 1 0.556867 0.879406 0.314791\n1397 1 0.62402 0.881183 0.254043\n1400 1 0.620239 0.943083 0.316776\n1271 1 0.681415 0.875865 0.185078\n1275 1 0.813615 0.879306 0.191794\n1276 1 0.751754 0.939537 0.185884\n1398 1 0.683849 0.941334 0.257109\n1399 1 0.691194 0.877948 0.313595\n1401 1 0.747521 0.880196 0.244893\n1402 1 0.817527 0.940384 0.247599\n1403 1 0.816581 0.877395 0.317682\n1404 1 0.752298 0.936899 0.30952\n1081 1 0.750141 0.625008 -0.0020045\n273 1 0.49864 0.999816 0.249177\n1630 1 0.943108 0.806809 0.501802\n1377 1 -0.000212799 0.870546 0.250299\n1279 1 0.939547 0.880375 0.186956\n1280 1 0.883769 0.935654 0.184174\n1405 1 0.878475 0.878253 0.253945\n1406 1 0.932784 0.939308 0.251788\n1407 1 0.940527 0.869288 0.31259\n1408 1 0.880135 0.942973 0.312055\n1570 1 0.0655682 0.686868 0.502515\n1031 1 0.186604 0.500796 0.0593323\n1410 1 0.0666023 0.570297 0.37613\n1416 1 0.128336 0.574051 0.436969\n1443 1 0.064811 0.62713 0.444077\n1445 1 0.12814 0.625373 0.371603\n1441 1 0.00586849 0.624699 0.374749\n1531 1 0.80276 0.877193 0.438729\n1039 1 0.440049 0.496826 0.0636544\n1449 1 0.248229 0.627271 0.375734\n1451 1 0.313138 0.626251 0.437217\n1420 1 0.250122 0.561713 0.429225\n1447 1 0.195701 0.634096 0.44169\n1414 1 0.183999 0.559503 0.373874\n1418 1 0.315313 0.566006 0.372478\n403 1 0.562993 0.998672 0.443727\n1589 1 0.626404 0.631893 0.49826\n1637 1 0.123258 0.866531 0.500566\n1422 1 0.443835 0.56123 0.376266\n1455 1 0.442365 0.623031 0.439019\n1424 1 0.377282 0.570003 0.435442\n1453 1 0.37571 0.637276 0.374686\n1457 1 0.501903 0.615712 0.372048\n1645 1 0.377429 0.880575 0.499704\n1281 1 0.00410744 0.500158 0.255738\n1428 1 0.505825 0.56462 0.438463\n1461 1 0.631412 0.622601 0.37188\n1426 1 0.567223 0.561816 0.376071\n1432 1 0.634227 0.562468 0.431881\n1459 1 0.571378 0.62505 0.431001\n1522 1 0.558259 0.944066 0.374345\n1430 1 0.694686 0.562899 0.372753\n1463 1 0.690431 0.626941 0.44138\n1436 1 0.756028 0.559697 0.436273\n1434 1 0.81649 0.559064 0.373203\n1465 1 0.748809 0.635151 0.377623\n1467 1 0.811878 0.624091 0.441311\n1155 1 0.0649185 0.505063 0.185119\n269 1 0.372214 1.00004 0.249596\n1109 1 0.626412 0.75327 -0.00495624\n1532 1 0.745848 0.947938 0.43151\n1412 1 1.00361 0.568432 0.435125\n1525 1 0.630626 0.869476 0.374771\n1440 1 0.870749 0.563578 0.441938\n1469 1 0.87951 0.631073 0.382142\n1471 1 0.933671 0.628268 0.44004\n1438 1 0.935742 0.567388 0.378718\n1585 1 0.505458 0.623564 0.500925\n1474 1 0.0587188 0.812776 0.375533\n1480 1 0.12268 0.810831 0.435992\n1475 1 0.06246 0.746416 0.428824\n1442 1 0.0674876 0.686695 0.37443\n1477 1 0.128798 0.75185 0.373045\n1448 1 0.126222 0.69192 0.434565\n1473 1 1.00068 0.747321 0.380946\n1027 1 0.0626289 0.502404 0.0616754\n1452 1 0.262136 0.695176 0.442076\n1450 1 0.313917 0.686365 0.375168\n1446 1 0.184253 0.690693 0.37922\n1482 1 0.308543 0.814495 0.37512\n1484 1 0.254699 0.80882 0.433152\n1478 1 0.190794 0.817645 0.381959\n1481 1 0.244024 0.748352 0.375265\n1483 1 0.31701 0.752967 0.439245\n1479 1 0.188589 0.747701 0.437028\n1606 1 0.18586 0.809318 0.500378\n1035 1 0.305738 0.497018 0.0531051\n1524 1 0.495432 0.943945 0.438352\n1456 1 0.373293 0.695012 0.438384\n1487 1 0.443028 0.754948 0.440473\n1485 1 0.376495 0.754294 0.372254\n1488 1 0.375488 0.81575 0.437207\n1454 1 0.439537 0.692015 0.376467\n1486 1 0.438001 0.817297 0.370469\n1492 1 0.496222 0.816338 0.435462\n1489 1 0.506059 0.751786 0.374993\n1460 1 0.50267 0.680603 0.441802\n1171 1 0.558987 0.500212 0.191491\n1517 1 0.37649 0.874554 0.376986\n1490 1 0.558673 0.818654 0.378926\n1458 1 0.562169 0.687744 0.373772\n1493 1 0.625841 0.755441 0.372739\n1491 1 0.561935 0.756621 0.432142\n1496 1 0.619776 0.817777 0.438573\n1464 1 0.630405 0.683958 0.435521\n385 1 -0.000757979 1.00231 0.377733\n267 1 0.308805 0.988867 0.311152\n1494 1 0.687295 0.80905 0.377004\n1466 1 0.811621 0.691123 0.381939\n1499 1 0.815185 0.759312 0.43761\n1495 1 0.685979 0.754148 0.450911\n1500 1 0.749407 0.816524 0.440885\n1497 1 0.749539 0.753108 0.373831\n1498 1 0.811587 0.818546 0.381234\n1468 1 0.747387 0.697264 0.441653\n1462 1 0.688181 0.701491 0.376077\n1519 1 0.437258 0.874965 0.437003\n1444 1 0.0061251 0.681494 0.434119\n1501 1 0.873891 0.747815 0.377141\n1504 1 0.877823 0.818183 0.441335\n1470 1 0.937973 0.68733 0.382205\n1476 1 1.00141 0.813672 0.435854\n1472 1 0.873804 0.686974 0.444058\n1503 1 0.933943 0.746406 0.433463\n1502 1 0.932971 0.809419 0.378517\n1569 1 0.999499 0.620748 0.499103\n149 1 0.624597 0.993946 0.126296\n391 1 0.187217 0.989922 0.441098\n1505 1 0.996243 0.877183 0.375199\n1037 1 0.377584 0.502054 0.00118493\n1597 1 0.874767 0.628517 0.503292\n1506 1 0.0644835 0.93414 0.376167\n1512 1 0.116208 0.934404 0.442187\n1507 1 0.0556118 0.873906 0.440177\n1509 1 0.12101 0.87639 0.379567\n261 1 0.118588 0.996375 0.247348\n1518 1 0.441769 0.945245 0.377124\n1523 1 0.563175 0.880455 0.437112\n1516 1 0.250503 0.930988 0.439049\n1514 1 0.308542 0.934406 0.378935\n1511 1 0.187633 0.874684 0.438174\n1510 1 0.177966 0.9363 0.369348\n1515 1 0.313849 0.874471 0.443383\n1513 1 0.24532 0.877689 0.377385\n1622 1 0.691633 0.818324 0.499075\n1521 1 0.501063 0.881324 0.377963\n1520 1 0.377975 0.942735 0.432954\n11 1 0.312364 1.00166 0.0573019\n1299 1 0.562937 0.50052 0.312702\n145 1 0.501808 1.00228 0.120474\n1423 1 0.446115 0.504827 0.436958\n141 1 0.372799 0.992427 0.12047\n1437 1 0.876366 0.503565 0.381944\n1311 1 0.939121 0.508832 0.321394\n1409 1 0.00539436 0.507406 0.374121\n1415 1 0.186307 0.501315 0.434684\n1289 1 0.248229 0.508182 0.252656\n1305 1 0.75789 0.504452 0.25652\n1034 1 0.311093 0.56871 0.000530442\n1090 1 0.0562786 0.808634 0.00225965\n1082 1 0.804844 0.687052 0.00375585\n1605 1 0.125207 0.748447 0.498006\n1618 1 0.5549 0.820072 0.48874\n1045 1 0.62205 0.505723 0.00898247\n1650 1 0.557009 0.933055 0.496391\n1601 1 0.00188178 0.750837 0.492189\n1566 1 0.934614 0.568411 0.495765\n1586 1 0.568691 0.688587 0.488545\n517 1 0.124543 1.0007 0.496191\n1654 1 0.688091 0.946444 0.501055\n1602 1 0.0547597 0.806975 0.495539\n1054 1 0.926686 0.568441 0.00103299\n25 1 0.752195 0.997639 0.00365253\n1130 1 0.308435 0.935058 -0.0016875\n1546 1 0.322215 0.563422 0.498041\n1046 1 0.695447 0.567179 0.00902163\n1590 1 0.686494 0.687051 0.499568\n1053 1 0.872416 0.498456 0.00589092\n1121 1 0.00469413 0.869822 -0.000139557\n1544 1 0.134595 0.569238 0.561782\n1571 1 0.0653558 0.627812 0.556138\n1666 1 0.0654595 0.568531 0.617954\n1701 1 0.121365 0.632087 0.627946\n1541 1 0.126504 0.506927 0.496693\n1573 1 0.133637 0.62927 0.498697\n1933 1 0.387288 0.504604 0.880463\n1548 1 0.247103 0.559259 0.569617\n1575 1 0.187405 0.626837 0.566238\n1579 1 0.308504 0.615493 0.564551\n1670 1 0.189609 0.559653 0.622636\n1674 1 0.311727 0.559272 0.623704\n1705 1 0.251025 0.628325 0.630118\n647 1 0.18325 0.996435 0.686861\n1626 1 0.810957 0.816351 0.502222\n1665 1 0.00926728 0.507999 0.625911\n901 1 0.126276 0.999541 0.879954\n1552 1 0.375176 0.570315 0.572492\n1583 1 0.44226 0.631587 0.564427\n1678 1 0.440246 0.559668 0.6247\n1709 1 0.377756 0.62904 0.624051\n1713 1 0.496077 0.631386 0.627217\n903 1 0.189046 0.999235 0.939043\n1805 1 0.381373 0.50082 0.754621\n799 1 0.936661 0.998777 0.808618\n669 1 0.880671 0.996098 0.619248\n1556 1 0.497956 0.561635 0.566814\n1633 1 0.00165729 0.872699 0.498484\n1560 1 0.628434 0.562965 0.56125\n1587 1 0.562199 0.626175 0.556135\n1682 1 0.568669 0.563052 0.62143\n1717 1 0.627933 0.624096 0.630038\n1578 1 0.314677 0.687016 0.506842\n1561 1 0.75546 0.506742 0.501708\n1613 1 0.377715 0.752198 0.501866\n1661 1 0.883321 0.871055 0.504331\n1564 1 0.747707 0.560811 0.565063\n1591 1 0.68113 0.624825 0.56103\n1595 1 0.815909 0.620581 0.563376\n1686 1 0.688811 0.563548 0.622994\n1690 1 0.81913 0.567129 0.622369\n1721 1 0.745632 0.626151 0.620235\n1098 1 0.308244 0.808769 0.994975\n539 1 0.798006 1.00013 0.561349\n923 1 0.805489 1.00085 0.939869\n1697 1 0.000480246 0.630067 0.621093\n1540 1 1.00323 0.565004 0.563328\n1568 1 0.87166 0.557682 0.564693\n1599 1 0.939982 0.62307 0.568798\n1694 1 0.935362 0.55889 0.620879\n1725 1 0.875764 0.628476 0.626821\n1813 1 0.627186 0.501715 0.74905\n1550 1 0.440727 0.559215 0.50017\n905 1 0.258062 0.997516 0.870785\n1576 1 0.129092 0.686342 0.560445\n1603 1 0.0569363 0.75106 0.560693\n1608 1 0.122482 0.808733 0.56659\n1698 1 0.0590183 0.691363 0.619752\n1730 1 0.0662467 0.816211 0.627377\n1733 1 0.121237 0.749015 0.625736\n1799 1 0.184743 0.494931 0.810931\n1572 1 0.000537899 0.688463 0.560035\n1803 1 0.310359 0.50295 0.809145\n653 1 0.381498 0.994077 0.627298\n1580 1 0.252479 0.688537 0.558361\n1607 1 0.188651 0.752299 0.566073\n1611 1 0.313059 0.751936 0.566636\n1612 1 0.248621 0.806624 0.563656\n1702 1 0.188585 0.695216 0.62535\n1706 1 0.316207 0.6878 0.618356\n1734 1 0.194188 0.810965 0.627334\n1737 1 0.254169 0.745282 0.633979\n1738 1 0.310999 0.809992 0.624547\n1582 1 0.445817 0.688036 0.499002\n781 1 0.378956 1.00252 0.75415\n911 1 0.441905 0.999575 0.935772\n1584 1 0.377276 0.689114 0.56222\n1615 1 0.446529 0.748372 0.563817\n1616 1 0.375356 0.814913 0.567521\n1710 1 0.439215 0.691046 0.624695\n1741 1 0.371678 0.749805 0.629344\n1742 1 0.440015 0.816034 0.624038\n1745 1 0.502273 0.746472 0.623879\n1588 1 0.504066 0.688278 0.565089\n1620 1 0.494772 0.819099 0.562432\n1638 1 0.182254 0.933713 0.503459\n1592 1 0.620989 0.689056 0.555935\n1619 1 0.554248 0.758376 0.56151\n1624 1 0.61754 0.819977 0.551087\n1714 1 0.568869 0.686118 0.622198\n1746 1 0.561815 0.816106 0.625856\n1749 1 0.622243 0.749609 0.626297\n649 1 0.245991 0.993707 0.624048\n1931 1 0.313058 0.500055 0.934414\n1596 1 0.744547 0.680224 0.558071\n1623 1 0.684372 0.755681 0.561584\n1627 1 0.809821 0.739061 0.566815\n1628 1 0.7543 0.814753 0.561122\n1718 1 0.686372 0.681585 0.620565\n1722 1 0.805478 0.686438 0.630644\n1750 1 0.680369 0.809829 0.627638\n1753 1 0.744436 0.748813 0.628867\n1754 1 0.815246 0.804648 0.618057\n1577 1 0.252976 0.623366 0.4986\n643 1 0.0648344 0.996401 0.693271\n1137 1 0.506856 0.876371 0.992337\n1729 1 0.993279 0.757839 0.622545\n1604 1 1.00331 0.816322 0.559266\n1600 1 0.874464 0.686879 0.566361\n1631 1 0.945265 0.748485 0.557755\n1632 1 0.871746 0.808613 0.557041\n1726 1 0.93882 0.689985 0.617674\n1757 1 0.878394 0.747418 0.619889\n1758 1 0.929603 0.816342 0.626463\n651 1 0.309216 0.993704 0.688839\n1949 1 0.876246 0.50346 0.871982\n1679 1 0.431753 0.49985 0.687012\n1635 1 0.0613523 0.878241 0.557866\n1640 1 0.127694 0.942677 0.563853\n1762 1 0.0691958 0.943321 0.623722\n1765 1 0.123615 0.874102 0.625554\n523 1 0.308149 0.99379 0.568618\n1636 1 1.00024 0.946139 0.560909\n897 1 1.00047 0.99903 0.877353\n2048 1 0.874113 0.94715 0.935122\n1639 1 0.186607 0.86371 0.559335\n1643 1 0.315074 0.880613 0.567987\n1644 1 0.243553 0.939421 0.566305\n1766 1 0.190878 0.935226 0.628425\n1769 1 0.25338 0.873827 0.630201\n1770 1 0.318433 0.935985 0.627313\n1078 1 0.677368 0.685297 1.00265\n1093 1 0.11568 0.753331 0.996745\n779 1 0.315983 1.00122 0.812088\n1647 1 0.436906 0.880906 0.565909\n1648 1 0.374608 0.945807 0.55477\n1773 1 0.375289 0.876489 0.629204\n1774 1 0.444596 0.940148 0.625842\n1777 1 0.499445 0.88258 0.623808\n1652 1 0.497564 0.942892 0.560466\n2047 1 0.937289 0.880324 0.940669\n1141 1 0.63021 0.874495 0.996767\n1117 1 0.867715 0.746845 0.996943\n1651 1 0.559837 0.88034 0.555194\n1656 1 0.624532 0.940501 0.564853\n1778 1 0.5555 0.942595 0.622088\n1781 1 0.620625 0.879597 0.634399\n2046 1 0.934526 0.935138 0.874807\n1655 1 0.680939 0.877366 0.56367\n1659 1 0.815459 0.878501 0.564159\n1660 1 0.750678 0.932999 0.563771\n1782 1 0.689167 0.941046 0.628097\n1785 1 0.755416 0.879153 0.625721\n1786 1 0.815941 0.943609 0.622307\n2045 1 0.872501 0.87916 0.866844\n1567 1 0.935994 0.502379 0.553247\n769 1 0.00064214 0.997427 0.752502\n1657 1 0.753019 0.877445 0.503322\n1142 1 0.691185 0.940949 0.994408\n1761 1 0.00148161 0.873909 0.617933\n1663 1 0.937373 0.869684 0.565135\n1664 1 0.87636 0.932497 0.56191\n1789 1 0.873148 0.872322 0.62183\n1790 1 0.938627 0.939844 0.618641\n1672 1 0.129412 0.564426 0.681472\n1699 1 0.0583857 0.630949 0.68279\n1794 1 0.0655101 0.563605 0.74357\n1800 1 0.12131 0.567384 0.801471\n1827 1 0.0586849 0.628708 0.805245\n1829 1 0.121744 0.631882 0.747005\n1825 1 1.00044 0.626629 0.74745\n1796 1 1.00356 0.55788 0.801698\n1609 1 0.255656 0.75981 0.499263\n917 1 0.627281 1.00463 0.877448\n1547 1 0.319156 0.507935 0.556566\n2039 1 0.687361 0.874235 0.928593\n1676 1 0.253742 0.562665 0.695629\n1703 1 0.18598 0.635005 0.678842\n1707 1 0.315925 0.620421 0.685645\n1798 1 0.187726 0.571155 0.746218\n1802 1 0.316876 0.557871 0.747436\n1804 1 0.253621 0.563982 0.808222\n1831 1 0.184694 0.626829 0.810506\n1833 1 0.25109 0.62011 0.750592\n1835 1 0.312365 0.624343 0.813096\n2038 1 0.68578 0.937491 0.874621\n2044 1 0.748407 0.942282 0.936725\n1680 1 0.371824 0.562253 0.67853\n1711 1 0.435516 0.617409 0.686349\n1806 1 0.434445 0.564144 0.75727\n1808 1 0.374971 0.565293 0.816768\n1837 1 0.378434 0.627001 0.750492\n1839 1 0.438677 0.630823 0.813397\n1684 1 0.495188 0.562706 0.686756\n1841 1 0.492888 0.628396 0.748862\n1812 1 0.496025 0.563138 0.809108\n2042 1 0.810936 0.940281 0.871543\n1688 1 0.633375 0.557063 0.688188\n1715 1 0.563032 0.626115 0.683883\n1810 1 0.557162 0.563374 0.740774\n1816 1 0.621782 0.558007 0.810863\n1843 1 0.56621 0.611837 0.808336\n1845 1 0.626788 0.620298 0.74666\n2043 1 0.817903 0.881651 0.929149\n1692 1 0.758054 0.561343 0.684612\n1719 1 0.69089 0.617705 0.69034\n1723 1 0.813386 0.624336 0.687665\n1814 1 0.696141 0.561502 0.749991\n1818 1 0.814839 0.561489 0.74824\n1820 1 0.756312 0.557813 0.814648\n1847 1 0.687099 0.617859 0.808668\n1849 1 0.752704 0.617423 0.749264\n1851 1 0.812958 0.621644 0.812031\n2041 1 0.749042 0.875481 0.86944\n1681 1 0.504179 0.501048 0.632517\n1146 1 0.811498 0.933513 0.999237\n1668 1 0.999262 0.564372 0.685922\n1696 1 0.871884 0.559029 0.687866\n1727 1 0.937579 0.618379 0.686094\n1822 1 0.931731 0.556757 0.746397\n1824 1 0.873486 0.562471 0.81119\n1853 1 0.870482 0.628017 0.748362\n1855 1 0.934744 0.623793 0.806472\n1689 1 0.746919 0.497827 0.630395\n1704 1 0.119418 0.696716 0.688245\n1731 1 0.0568617 0.75246 0.688149\n1736 1 0.131401 0.812745 0.684024\n1826 1 0.0558298 0.694915 0.742189\n1832 1 0.130914 0.686142 0.811432\n1858 1 0.052344 0.811487 0.746053\n1859 1 0.0576264 0.748587 0.815192\n1861 1 0.118697 0.756607 0.745298\n1864 1 0.12337 0.811862 0.814629\n1828 1 0.99261 0.68678 0.809322\n1732 1 0.988392 0.820025 0.679782\n1708 1 0.257648 0.685049 0.693454\n1735 1 0.182701 0.748585 0.684595\n1739 1 0.312846 0.752334 0.688961\n1740 1 0.25211 0.808388 0.692787\n1830 1 0.184828 0.691558 0.743008\n1834 1 0.314999 0.689019 0.754194\n1836 1 0.245994 0.685496 0.804279\n1862 1 0.183929 0.813728 0.748288\n1863 1 0.190508 0.752724 0.81294\n1865 1 0.25003 0.751789 0.74943\n1866 1 0.316892 0.815146 0.756527\n1867 1 0.311636 0.749389 0.806466\n1868 1 0.245457 0.815687 0.813534\n1712 1 0.383081 0.687386 0.681568\n1743 1 0.437854 0.749668 0.685523\n1744 1 0.382377 0.814357 0.685583\n1838 1 0.434368 0.685247 0.749891\n1840 1 0.37459 0.685533 0.814932\n1869 1 0.372075 0.750049 0.741441\n1870 1 0.438686 0.811466 0.746235\n1871 1 0.436232 0.742897 0.813529\n1872 1 0.376594 0.801017 0.813769\n1873 1 0.499425 0.752072 0.755127\n1748 1 0.496463 0.804747 0.688338\n1876 1 0.500672 0.813664 0.81345\n1716 1 0.507547 0.693799 0.691867\n1844 1 0.49871 0.691047 0.817627\n1720 1 0.621726 0.687452 0.695913\n1747 1 0.564095 0.75921 0.692417\n1752 1 0.616316 0.819644 0.690759\n1842 1 0.557103 0.680396 0.758476\n1848 1 0.62819 0.688847 0.812968\n1874 1 0.568067 0.816846 0.756587\n1875 1 0.565397 0.752244 0.812997\n1877 1 0.62985 0.751454 0.752682\n1880 1 0.625102 0.811408 0.81376\n1724 1 0.745072 0.683725 0.688449\n1751 1 0.684143 0.742292 0.687761\n1755 1 0.811839 0.75369 0.682312\n1756 1 0.747555 0.813375 0.684349\n1846 1 0.684725 0.68511 0.750883\n1850 1 0.811153 0.689568 0.752303\n1852 1 0.752961 0.686788 0.81341\n1878 1 0.682465 0.815805 0.745719\n1879 1 0.691231 0.747327 0.810531\n1881 1 0.748606 0.742871 0.748111\n1882 1 0.806237 0.817215 0.742852\n1883 1 0.815464 0.750134 0.81085\n1884 1 0.749976 0.808512 0.803583\n1860 1 0.994163 0.819791 0.812464\n1857 1 0.993006 0.755407 0.752128\n1700 1 0.994231 0.685956 0.693134\n1728 1 0.869069 0.689254 0.685107\n1759 1 0.936158 0.747463 0.690786\n1760 1 0.866139 0.81751 0.68304\n1854 1 0.932916 0.685628 0.750689\n1856 1 0.87686 0.69163 0.813633\n1885 1 0.875083 0.7612 0.750198\n1886 1 0.936952 0.818359 0.743556\n1887 1 0.936241 0.756159 0.810992\n1888 1 0.867092 0.816742 0.807666\n1929 1 0.250979 0.504651 0.869361\n1763 1 0.055176 0.874759 0.687691\n1768 1 0.123841 0.940374 0.689319\n1890 1 0.0571391 0.935873 0.748182\n1891 1 0.0593564 0.871684 0.810103\n1893 1 0.123273 0.878038 0.753097\n1896 1 0.119174 0.945956 0.806688\n1892 1 0.994878 0.93612 0.815163\n1764 1 0.996307 0.936857 0.687122\n1889 1 0.994145 0.875141 0.751926\n1133 1 0.379043 0.871081 0.997867\n1669 1 0.119422 0.503251 0.617285\n1030 1 0.187436 0.554807 0.994522\n1106 1 0.568677 0.819846 0.993445\n1767 1 0.189537 0.872611 0.687092\n1771 1 0.318669 0.870282 0.689856\n1772 1 0.25147 0.935473 0.695051\n1894 1 0.18209 0.939818 0.750219\n1895 1 0.186558 0.878414 0.805333\n1897 1 0.24949 0.868537 0.754193\n1898 1 0.319845 0.934604 0.749548\n1899 1 0.307372 0.876692 0.814482\n1900 1 0.248482 0.937432 0.803743\n1026 1 0.0498763 0.560868 0.989944\n1775 1 0.445673 0.882464 0.686736\n1776 1 0.383011 0.937578 0.693698\n1901 1 0.377959 0.867432 0.755212\n1902 1 0.444128 0.937075 0.75775\n1903 1 0.436134 0.863543 0.812572\n1904 1 0.37606 0.93781 0.813522\n1780 1 0.501805 0.942462 0.69404\n1908 1 0.504599 0.953919 0.811764\n1905 1 0.495901 0.875342 0.752625\n2013 1 0.871575 0.750996 0.869243\n2015 1 0.938295 0.750569 0.930005\n2036 1 0.500873 0.941637 0.936822\n1779 1 0.560746 0.883199 0.684418\n1784 1 0.619919 0.943817 0.691852\n1906 1 0.562492 0.940205 0.757501\n1907 1 0.556995 0.879183 0.809979\n1909 1 0.620985 0.881011 0.748226\n1912 1 0.626736 0.937581 0.80983\n2012 1 0.755414 0.810129 0.932544\n2027 1 0.31677 0.874288 0.94321\n1574 1 0.190861 0.698689 0.505638\n1980 1 0.756715 0.683542 0.935936\n521 1 0.251412 0.991386 0.498102\n2014 1 0.937455 0.814282 0.870698\n1783 1 0.687522 0.876578 0.685128\n1787 1 0.812129 0.88051 0.684211\n1788 1 0.757001 0.937092 0.679107\n1910 1 0.687824 0.941319 0.747009\n1911 1 0.691283 0.877312 0.813852\n1913 1 0.751787 0.877152 0.743678\n1914 1 0.812314 0.938929 0.741541\n1915 1 0.808454 0.881021 0.808867\n1916 1 0.743313 0.93862 0.812318\n2016 1 0.872644 0.812189 0.934236\n1791 1 0.939027 0.881666 0.678496\n1792 1 0.880028 0.933814 0.681202\n1917 1 0.873803 0.880719 0.749202\n1918 1 0.937068 0.937998 0.744971\n1919 1 0.93156 0.87882 0.815387\n1920 1 0.874084 0.938237 0.810412\n2040 1 0.628864 0.936304 0.932222\n1984 1 0.872812 0.680981 0.936847\n2011 1 0.813746 0.746798 0.940182\n1922 1 0.0596914 0.567052 0.863176\n1928 1 0.12345 0.558921 0.936939\n1955 1 0.0543501 0.620615 0.92521\n1957 1 0.120232 0.626082 0.868301\n1924 1 0.991624 0.563645 0.933558\n1058 1 0.0531478 0.696823 0.999081\n2006 1 0.693256 0.806072 0.870316\n2021 1 0.125754 0.876682 0.87861\n1930 1 0.315599 0.564898 0.876059\n1926 1 0.178129 0.560378 0.870224\n1963 1 0.31093 0.625389 0.934339\n1961 1 0.252212 0.62314 0.877507\n1932 1 0.24973 0.566317 0.937639\n1959 1 0.189568 0.628697 0.935868\n1667 1 0.0658348 0.501423 0.686048\n2034 1 0.567865 0.94188 0.869721\n2018 1 0.054933 0.939984 0.873809\n1061 1 0.114945 0.62805 0.991198\n2035 1 0.560531 0.8851 0.930813\n1965 1 0.375025 0.62284 0.878758\n1934 1 0.448352 0.561512 0.881063\n1936 1 0.378419 0.561888 0.942599\n1967 1 0.437428 0.620888 0.942056\n2019 1 0.0623548 0.876037 0.938692\n1809 1 0.493483 0.503569 0.748572\n2024 1 0.11878 0.93914 0.932123\n641 1 0.00205385 0.997031 0.629643\n1940 1 0.504143 0.560464 0.944607\n1969 1 0.49757 0.618526 0.875018\n1938 1 0.564034 0.566866 0.879803\n1944 1 0.62119 0.563956 0.940969\n1971 1 0.561101 0.619081 0.944221\n1973 1 0.625883 0.627473 0.873558\n1145 1 0.746894 0.882261 0.993058\n1634 1 0.0595531 0.937503 0.498997\n1065 1 0.243766 0.634011 0.997355\n1942 1 0.687802 0.564146 0.87447\n1946 1 0.813531 0.562692 0.876643\n1977 1 0.747678 0.619313 0.872274\n1979 1 0.813909 0.618713 0.936599\n1948 1 0.75067 0.564326 0.936889\n1975 1 0.681626 0.623804 0.940907\n2028 1 0.253318 0.932225 0.937943\n2023 1 0.18633 0.876244 0.943373\n1125 1 0.123 0.86594 0.991615\n1953 1 0.993451 0.620999 0.864374\n1950 1 0.939383 0.558738 0.869746\n1981 1 0.872477 0.621842 0.873212\n1983 1 0.931023 0.621471 0.935096\n1952 1 0.870641 0.563297 0.934851\n2037 1 0.621855 0.875133 0.873139\n2030 1 0.437579 0.942046 0.870052\n2026 1 0.31863 0.929724 0.880056\n2020 1 0.00163609 0.938695 0.942029\n1988 1 1.00533 0.811992 0.931842\n1986 1 0.0560025 0.808891 0.872239\n1987 1 0.0550637 0.74821 0.934576\n1954 1 0.0622545 0.681499 0.866087\n1992 1 0.119199 0.808877 0.930461\n1989 1 0.124824 0.746583 0.869264\n1960 1 0.12189 0.683943 0.93183\n1985 1 0.993652 0.745145 0.868813\n1956 1 1.00209 0.682623 0.925889\n2025 1 0.249024 0.873286 0.876219\n1995 1 0.314969 0.747686 0.936454\n1996 1 0.250518 0.810218 0.930884\n1991 1 0.180648 0.748642 0.931033\n1958 1 0.190119 0.681381 0.874984\n1964 1 0.249652 0.685443 0.935843\n1993 1 0.257369 0.749553 0.865466\n1962 1 0.314812 0.690228 0.877041\n1990 1 0.186638 0.817722 0.870756\n1994 1 0.309792 0.814576 0.87294\n2032 1 0.383818 0.937826 0.936434\n1982 1 0.935144 0.680511 0.871909\n2029 1 0.371466 0.869577 0.864497\n1999 1 0.437377 0.749241 0.942543\n1998 1 0.436173 0.801838 0.873971\n1966 1 0.436665 0.687257 0.879155\n1968 1 0.377098 0.682305 0.939302\n1997 1 0.374944 0.749726 0.877028\n2000 1 0.368775 0.810003 0.931387\n1972 1 0.499853 0.682281 0.937261\n2031 1 0.440037 0.870022 0.9305\n1113 1 0.745355 0.749676 0.998809\n2004 1 0.504905 0.81664 0.929621\n2001 1 0.502078 0.749337 0.87424\n2005 1 0.627006 0.748564 0.873325\n2002 1 0.562102 0.812034 0.879427\n1970 1 0.561494 0.686876 0.868087\n1976 1 0.625573 0.68187 0.934247\n2003 1 0.566551 0.743973 0.932396\n2008 1 0.633488 0.814242 0.936699\n2033 1 0.493058 0.880755 0.868123\n2017 1 0.996191 0.878787 0.875634\n1074 1 0.566316 0.694526 1.00157\n2022 1 0.191428 0.938228 0.871475\n2010 1 0.804611 0.807074 0.86528\n2007 1 0.680102 0.74731 0.935135\n2009 1 0.753912 0.74509 0.874446\n1978 1 0.816786 0.681976 0.869183\n1974 1 0.690609 0.685261 0.868542\n1129 1 0.251497 0.873221 0.994404\n909 1 0.377137 1.00006 0.879002\n1094 1 0.191946 0.80251 0.990798\n1555 1 0.565859 0.500925 0.561863\n1057 1 -0.00309626 0.629372 0.997874\n1642 1 0.314314 0.942804 0.495962\n1062 1 0.178184 0.689267 0.995083\n1658 1 0.815447 0.940968 0.502516\n793 1 0.743105 1.00288 0.748977\n671 1 0.941433 0.995691 0.686931\n1089 1 0.990399 0.746802 0.995184\n1923 1 0.05581 0.498101 0.930794\n1097 1 0.250951 0.744118 0.991543\n1797 1 0.123376 0.501534 0.752231\n1085 1 0.873127 0.629855 1.00244\n1101 1 0.371896 0.750454 1.00422\n667 1 0.813528 1.00075 0.685281\n527 1 0.437471 1.00358 0.564627\n531 1 0.56325 0.9973 0.564851\n1105 1 0.49917 0.749498 0.995392\n1114 1 0.81342 0.822531 0.996507\n1102 1 0.439032 0.814091 0.994953\n1110 1 0.699646 0.815527 0.990578\n1581 1 0.381701 0.627057 0.504204\n13 1 0.372918 0.998796 0.999534\n1050 1 0.816684 0.565566 1.00689\n1807 1 0.439654 0.499143 0.821484\n907 1 0.317687 0.99607 0.93502\n1943 1 0.682512 0.504109 0.937564\n1811 1 0.557465 0.505707 0.809291\n1641 1 0.253317 0.869057 0.501578\n775 1 0.184156 0.999021 0.810872\n1538 1 0.06898 0.560485 0.501483\n1134 1 0.439629 0.930118 0.989881\n1662 1 0.937476 0.934607 0.502692\n1138 1 0.568217 0.93067 0.995241\n1086 1 0.933867 0.68585 0.999331\n1122 1 0.067583 0.939773 0.99392\n1562 1 0.81253 0.563951 0.509341\n1625 1 0.75164 0.75589 0.505127\n1598 1 0.940507 0.685858 0.500397\n1629 1 0.873073 0.749917 0.501831\n1118 1 0.930604 0.806269 0.992672\n1593 1 0.7484 0.618693 0.501168\n1149 1 0.87778 0.878256 0.996482\n1649 1 0.492737 0.880062 0.504117\n1610 1 0.315196 0.822664 0.510869\n1614 1 0.432621 0.813445 0.50591\n"
] | true |
99,509 |
9d8287ae3d75a6864077828ab68167a66ea343fd
|
"""A class for stashing data (and possibly filtering it, in an online way)."""
from vessel import Vessel
from datetime import datetime
import numpy as np
from collections import deque
class Stash(object):
"""Store data and filter it."""
def __init__(
self,
nb_taps: int = 5,
demand_uniqueness: bool = True,
do_filter=True,
save_data=False,
):
self.do_filter = do_filter
self.save_data = save_data
self.demand_uniqueness = demand_uniqueness
self.M = M = nb_taps
self.p = p = int((M - 1) / 2)
self.q = p + 1
# These vectors hold the time/values being added to the stash.
self.x = deque([], maxlen=1000)
self.t = deque([], maxlen=1000)
# These variables are the filtered version of t/x; cannot sample from these vectors...
self.t_ = deque([], maxlen=1000)
self.x_ = deque([], maxlen=1000)
self.x_prev = 0
# These variables are the filtered version from which we sample. We
# have two versions because, depending on how quickly we're sampling
# from the the object, we may exhaust the data needed for the moving
# average filter.
self.t_filtered = deque([], maxlen=1000)
self.x_filtered = deque([], maxlen=1000)
if self.save_data:
datestring = datetime.now().strftime("%Y.%m.%d.%H.%M")
self.store = Vessel(f"data/{datestring}.dat")
self.store.t = []
self.store.x = []
def add(self, t, x):
"""Add new point."""
if self.demand_uniqueness:
# Cannot add two successive identical values.
if len(self.x) > 0:
if self.x[-1] != x:
self.t.append(t)
self.x.append(x)
self.save_to_store(t, x)
else:
self.t.append(t)
self.x.append(x)
self.save_to_store(t, x)
else:
self.t.append(t)
self.x.append(x)
self.save_to_store(t, x)
if len(self.x) >= self.M and self.do_filter:
self.filter()
def save_to_store(self, t, x):
if self.save_data:
self.store.t.append(t)
self.store.x.append(x)
if np.mod(len(self.store.t), 1000) == 0:
# Save every 1000 samples.
self.store.save()
def filter(self):
"""Super efficient moving average filter."""
M, p, q = self.M, self.p, self.q
x = self.x
idx = len(self.x) - (p + 1)
x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M
self.t_.append(self.t[idx])
self.t_filtered.append(self.t[idx])
self.x_.append(x_)
self.x_filtered.append(x_)
self.x_prev = x_
@property
def sample(self):
"""Return first observed pair (t, x), still in queue."""
if self.do_filter:
if len(self.t_filtered) > 0:
yield self.t_filtered.popleft(), self.x_filtered.popleft()
else:
yield None, None
else: # let's not filter
if len(self.t) > 0:
yield self.t.popleft(), self.x.popleft()
else:
yield None, None
if __name__ == "__main__":
import pylab as plt
plt.ion()
plt.close("all")
# Create a noisy sinusoidal signal.
t = np.linspace(0, 10, 1000)
x = np.sin(2 * np.pi * t / 3) + 0.05 * np.random.randn(1000) + 15
# Estimate number of taps required for specified cutoff frequency.
# See (https://goo.gl/yCySp4) for more details.
fs = 100 # sampling rate
fc = 5 # cutoff frequency
Fco = fc / fs # normalized cutoff frequency
alpha = 0.196202
N = int(np.ceil(np.sqrt(alpha + Fco ** 2) / Fco))
# Plot the example data
plt.figure()
plt.plot(t, x)
# Create a data stash!
pzt = Stash(N)
# Add a bunch of samples to the stash.
for t_, x_ in zip(t, x):
pzt.add(t_, x_)
# Now plot the resulting filtered data.
t_, x_ = pzt.t_filtered, pzt.x_filtered
plt.plot(t_, x_)
# Note also that you can sample from the object because it's a generator.
t0, x0 = next(pzt.sample)
t1, x1 = next(pzt.sample)
# ... and so on
|
[
"\"\"\"A class for stashing data (and possibly filtering it, in an online way).\"\"\"\nfrom vessel import Vessel\n\nfrom datetime import datetime\nimport numpy as np\nfrom collections import deque\n\n\nclass Stash(object):\n \"\"\"Store data and filter it.\"\"\"\n\n def __init__(\n self,\n nb_taps: int = 5,\n demand_uniqueness: bool = True,\n do_filter=True,\n save_data=False,\n ):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n\n # These vectors hold the time/values being added to the stash.\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n\n # These variables are the filtered version of t/x; cannot sample from these vectors...\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n\n # These variables are the filtered version from which we sample. We\n # have two versions because, depending on how quickly we're sampling\n # from the the object, we may exhaust the data needed for the moving\n # average filter.\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime(\"%Y.%m.%d.%H.%M\")\n self.store = Vessel(f\"data/{datestring}.dat\")\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n # Cannot add two successive identical values.\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n\n def save_to_store(self, t, x):\n if self.save_data:\n self.store.t.append(t)\n self.store.x.append(x)\n if np.mod(len(self.store.t), 1000) == 0:\n # Save every 1000 samples.\n self.store.save()\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n else: # let's not filter\n if len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\nif __name__ == \"__main__\":\n import pylab as plt\n\n plt.ion()\n plt.close(\"all\")\n\n # Create a noisy sinusoidal signal.\n t = np.linspace(0, 10, 1000)\n x = np.sin(2 * np.pi * t / 3) + 0.05 * np.random.randn(1000) + 15\n\n # Estimate number of taps required for specified cutoff frequency.\n # See (https://goo.gl/yCySp4) for more details.\n fs = 100 # sampling rate\n fc = 5 # cutoff frequency\n Fco = fc / fs # normalized cutoff frequency\n alpha = 0.196202\n N = int(np.ceil(np.sqrt(alpha + Fco ** 2) / Fco))\n\n # Plot the example data\n plt.figure()\n plt.plot(t, x)\n\n # Create a data stash!\n pzt = Stash(N)\n\n # Add a bunch of samples to the stash.\n for t_, x_ in zip(t, x):\n pzt.add(t_, x_)\n\n # Now plot the resulting filtered data.\n t_, x_ = pzt.t_filtered, pzt.x_filtered\n plt.plot(t_, x_)\n\n # Note also that you can sample from the object because it's a generator.\n t0, x0 = next(pzt.sample)\n t1, x1 = next(pzt.sample)\n # ... and so on\n",
"<docstring token>\nfrom vessel import Vessel\nfrom datetime import datetime\nimport numpy as np\nfrom collections import deque\n\n\nclass Stash(object):\n \"\"\"Store data and filter it.\"\"\"\n\n def __init__(self, nb_taps: int=5, demand_uniqueness: bool=True,\n do_filter=True, save_data=False):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime('%Y.%m.%d.%H.%M')\n self.store = Vessel(f'data/{datestring}.dat')\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n\n def save_to_store(self, t, x):\n if self.save_data:\n self.store.t.append(t)\n self.store.x.append(x)\n if np.mod(len(self.store.t), 1000) == 0:\n self.store.save()\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\nif __name__ == '__main__':\n import pylab as plt\n plt.ion()\n plt.close('all')\n t = np.linspace(0, 10, 1000)\n x = np.sin(2 * np.pi * t / 3) + 0.05 * np.random.randn(1000) + 15\n fs = 100\n fc = 5\n Fco = fc / fs\n alpha = 0.196202\n N = int(np.ceil(np.sqrt(alpha + Fco ** 2) / Fco))\n plt.figure()\n plt.plot(t, x)\n pzt = Stash(N)\n for t_, x_ in zip(t, x):\n pzt.add(t_, x_)\n t_, x_ = pzt.t_filtered, pzt.x_filtered\n plt.plot(t_, x_)\n t0, x0 = next(pzt.sample)\n t1, x1 = next(pzt.sample)\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n \"\"\"Store data and filter it.\"\"\"\n\n def __init__(self, nb_taps: int=5, demand_uniqueness: bool=True,\n do_filter=True, save_data=False):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime('%Y.%m.%d.%H.%M')\n self.store = Vessel(f'data/{datestring}.dat')\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n\n def save_to_store(self, t, x):\n if self.save_data:\n self.store.t.append(t)\n self.store.x.append(x)\n if np.mod(len(self.store.t), 1000) == 0:\n self.store.save()\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\nif __name__ == '__main__':\n import pylab as plt\n plt.ion()\n plt.close('all')\n t = np.linspace(0, 10, 1000)\n x = np.sin(2 * np.pi * t / 3) + 0.05 * np.random.randn(1000) + 15\n fs = 100\n fc = 5\n Fco = fc / fs\n alpha = 0.196202\n N = int(np.ceil(np.sqrt(alpha + Fco ** 2) / Fco))\n plt.figure()\n plt.plot(t, x)\n pzt = Stash(N)\n for t_, x_ in zip(t, x):\n pzt.add(t_, x_)\n t_, x_ = pzt.t_filtered, pzt.x_filtered\n plt.plot(t_, x_)\n t0, x0 = next(pzt.sample)\n t1, x1 = next(pzt.sample)\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n \"\"\"Store data and filter it.\"\"\"\n\n def __init__(self, nb_taps: int=5, demand_uniqueness: bool=True,\n do_filter=True, save_data=False):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime('%Y.%m.%d.%H.%M')\n self.store = Vessel(f'data/{datestring}.dat')\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n\n def save_to_store(self, t, x):\n if self.save_data:\n self.store.t.append(t)\n self.store.x.append(x)\n if np.mod(len(self.store.t), 1000) == 0:\n self.store.save()\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n\n def __init__(self, nb_taps: int=5, demand_uniqueness: bool=True,\n do_filter=True, save_data=False):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime('%Y.%m.%d.%H.%M')\n self.store = Vessel(f'data/{datestring}.dat')\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n\n def save_to_store(self, t, x):\n if self.save_data:\n self.store.t.append(t)\n self.store.x.append(x)\n if np.mod(len(self.store.t), 1000) == 0:\n self.store.save()\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n\n def __init__(self, nb_taps: int=5, demand_uniqueness: bool=True,\n do_filter=True, save_data=False):\n self.do_filter = do_filter\n self.save_data = save_data\n self.demand_uniqueness = demand_uniqueness\n self.M = M = nb_taps\n self.p = p = int((M - 1) / 2)\n self.q = p + 1\n self.x = deque([], maxlen=1000)\n self.t = deque([], maxlen=1000)\n self.t_ = deque([], maxlen=1000)\n self.x_ = deque([], maxlen=1000)\n self.x_prev = 0\n self.t_filtered = deque([], maxlen=1000)\n self.x_filtered = deque([], maxlen=1000)\n if self.save_data:\n datestring = datetime.now().strftime('%Y.%m.%d.%H.%M')\n self.store = Vessel(f'data/{datestring}.dat')\n self.store.t = []\n self.store.x = []\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n <function token>\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n <function token>\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n <function token>\n\n def filter(self):\n \"\"\"Super efficient moving average filter.\"\"\"\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n <function token>\n\n def add(self, t, x):\n \"\"\"Add new point.\"\"\"\n if self.demand_uniqueness:\n if len(self.x) > 0:\n if self.x[-1] != x:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n else:\n self.t.append(t)\n self.x.append(x)\n self.save_to_store(t, x)\n if len(self.x) >= self.M and self.do_filter:\n self.filter()\n <function token>\n <function token>\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def sample(self):\n \"\"\"Return first observed pair (t, x), still in queue.\"\"\"\n if self.do_filter:\n if len(self.t_filtered) > 0:\n yield self.t_filtered.popleft(), self.x_filtered.popleft()\n else:\n yield None, None\n elif len(self.t) > 0:\n yield self.t.popleft(), self.x.popleft()\n else:\n yield None, None\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Stash(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
99,510 |
e0460ad5579f53fa9a310dd448976733e9047ed2
|
import unittest
import springer_link_csv_to_bibtex_parser
import tempfile
import shutil
import filecmp
from os import path
class TestSplitCamelCaseJoinedNames(unittest.TestCase):
def test_regular_joined_camel_case_names(self):
split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names("JohnMarkPeter")
self.assertEqual(split_name, ["John", "Mark", "Peter"])
def test_lower_case_first_name_in_camel_case_joined_names(self):
split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names("johnMarkPeter")
self.assertEqual(split_name, ["john", "Mark", "Peter"])
def test_accented_characters_in_camel_case_joined_name(self):
split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names("JoãoAdriánFrançois")
self.assertEqual(split_name, ["João", "Adrián", "François"])
class TestJoinNamesAsCamelCase(unittest.TestCase):
def test_regular_name(self):
camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case("Sally Carter")
self.assertEquals(camel_case_joined_name, "sallyCarter")
def test_triple_name(self):
camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case("John James Peter")
self.assertEquals(camel_case_joined_name, "johnJamesPeter")
def test_accented_characters_in_names(self):
camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case("Zoë Noël")
self.assertEquals(camel_case_joined_name, "zoëNoël")
class TestConvertCsvToBibtex(unittest.TestCase):
def setUp(self):
self.test_directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_directory)
def test_single_csv_to_bibtex_entry(self):
test_single_bibtex_entry = path.join(self.test_directory, "test_single_bibtex_entry.bib")
expected_single_bibtex_entry = "gold_standard_bibtex_files/single_bibtex_entry.bib"
parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser("test_csv_files/single_csv_entry.csv",
test_single_bibtex_entry)
parser.convert_csv_to_bibtex()
self.assertTrue(filecmp.cmp(test_single_bibtex_entry, expected_single_bibtex_entry), "Files do not match")
def test_multiple_csv_to_bibtex_entry(self):
test_multiple_bibtex_entries = path.join(self.test_directory, "test_multiple_bibtex_entries.bib")
expected_multiple_bibtex_entries = "gold_standard_bibtex_files/multiple_bibtex_entries.bib"
parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser("test_csv_files/multiple_csv_entries.csv",
test_multiple_bibtex_entries)
parser.convert_csv_to_bibtex()
self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries, expected_multiple_bibtex_entries),
"Files do not match")
if __name__ == '__main__':
unittest.main()
|
[
"import unittest\nimport springer_link_csv_to_bibtex_parser\nimport tempfile\nimport shutil\nimport filecmp\nfrom os import path\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n\n def test_regular_joined_camel_case_names(self):\n split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names(\"JohnMarkPeter\")\n self.assertEqual(split_name, [\"John\", \"Mark\", \"Peter\"])\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names(\"johnMarkPeter\")\n self.assertEqual(split_name, [\"john\", \"Mark\", \"Peter\"])\n\n def test_accented_characters_in_camel_case_joined_name(self):\n split_name = springer_link_csv_to_bibtex_parser.split_camel_case_joined_names(\"JoãoAdriánFrançois\")\n self.assertEqual(split_name, [\"João\", \"Adrián\", \"François\"])\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case(\"Sally Carter\")\n self.assertEquals(camel_case_joined_name, \"sallyCarter\")\n\n def test_triple_name(self):\n camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case(\"John James Peter\")\n self.assertEquals(camel_case_joined_name, \"johnJamesPeter\")\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = springer_link_csv_to_bibtex_parser.join_names_as_camel_case(\"Zoë Noël\")\n self.assertEquals(camel_case_joined_name, \"zoëNoël\")\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory, \"test_single_bibtex_entry.bib\")\n expected_single_bibtex_entry = \"gold_standard_bibtex_files/single_bibtex_entry.bib\"\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\"test_csv_files/single_csv_entry.csv\",\n test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry, expected_single_bibtex_entry), \"Files do not match\")\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory, \"test_multiple_bibtex_entries.bib\")\n expected_multiple_bibtex_entries = \"gold_standard_bibtex_files/multiple_bibtex_entries.bib\"\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\"test_csv_files/multiple_csv_entries.csv\",\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries, expected_multiple_bibtex_entries),\n \"Files do not match\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nimport springer_link_csv_to_bibtex_parser\nimport tempfile\nimport shutil\nimport filecmp\nfrom os import path\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n\n def test_regular_joined_camel_case_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JohnMarkPeter'))\n self.assertEqual(split_name, ['John', 'Mark', 'Peter'])\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('johnMarkPeter'))\n self.assertEqual(split_name, ['john', 'Mark', 'Peter'])\n\n def test_accented_characters_in_camel_case_joined_name(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JoãoAdriánFrançois'))\n self.assertEqual(split_name, ['João', 'Adrián', 'François'])\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n\n def test_regular_joined_camel_case_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JohnMarkPeter'))\n self.assertEqual(split_name, ['John', 'Mark', 'Peter'])\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('johnMarkPeter'))\n self.assertEqual(split_name, ['john', 'Mark', 'Peter'])\n\n def test_accented_characters_in_camel_case_joined_name(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JoãoAdriánFrançois'))\n self.assertEqual(split_name, ['João', 'Adrián', 'François'])\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n\n def test_regular_joined_camel_case_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JohnMarkPeter'))\n self.assertEqual(split_name, ['John', 'Mark', 'Peter'])\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('johnMarkPeter'))\n self.assertEqual(split_name, ['john', 'Mark', 'Peter'])\n\n def test_accented_characters_in_camel_case_joined_name(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JoãoAdriánFrançois'))\n self.assertEqual(split_name, ['João', 'Adrián', 'François'])\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n\n def test_regular_joined_camel_case_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('JohnMarkPeter'))\n self.assertEqual(split_name, ['John', 'Mark', 'Peter'])\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('johnMarkPeter'))\n self.assertEqual(split_name, ['john', 'Mark', 'Peter'])\n <function token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n <function token>\n\n def test_lower_case_first_name_in_camel_case_joined_names(self):\n split_name = (springer_link_csv_to_bibtex_parser.\n split_camel_case_joined_names('johnMarkPeter'))\n self.assertEqual(split_name, ['john', 'Mark', 'Peter'])\n <function token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n\n\nclass TestSplitCamelCaseJoinedNames(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n\n def test_accented_characters_in_names(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Zoë Noël'))\n self.assertEquals(camel_case_joined_name, 'zoëNoël')\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n\n def test_triple_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('John James Peter'))\n self.assertEquals(camel_case_joined_name, 'johnJamesPeter')\n <function token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n\n def test_regular_name(self):\n camel_case_joined_name = (springer_link_csv_to_bibtex_parser.\n join_names_as_camel_case('Sally Carter'))\n self.assertEquals(camel_case_joined_name, 'sallyCarter')\n <function token>\n <function token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass TestJoinNamesAsCamelCase(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_directory)\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n <function token>\n\n def test_single_csv_to_bibtex_entry(self):\n test_single_bibtex_entry = path.join(self.test_directory,\n 'test_single_bibtex_entry.bib')\n expected_single_bibtex_entry = (\n 'gold_standard_bibtex_files/single_bibtex_entry.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/single_csv_entry.csv', test_single_bibtex_entry)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_single_bibtex_entry,\n expected_single_bibtex_entry), 'Files do not match')\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n\n def setUp(self):\n self.test_directory = tempfile.mkdtemp()\n <function token>\n <function token>\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_multiple_csv_to_bibtex_entry(self):\n test_multiple_bibtex_entries = path.join(self.test_directory,\n 'test_multiple_bibtex_entries.bib')\n expected_multiple_bibtex_entries = (\n 'gold_standard_bibtex_files/multiple_bibtex_entries.bib')\n parser = springer_link_csv_to_bibtex_parser.CsvToBibtexParser(\n 'test_csv_files/multiple_csv_entries.csv',\n test_multiple_bibtex_entries)\n parser.convert_csv_to_bibtex()\n self.assertTrue(filecmp.cmp(test_multiple_bibtex_entries,\n expected_multiple_bibtex_entries), 'Files do not match')\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n\n\nclass TestConvertCsvToBibtex(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
99,511 |
39798e390e9cdc1721da5c3a0e3c8fadc02193b8
|
import soporte
import parte1
v = soporte.vector_known_range(300000)
c = [0] * 300000
def contar(vector):
contador_casillas_no_vacias = 0
for x in vector:
c[x] += 1
for num in c:
if num != 0:
contador_casillas_no_vacias += 1
return contador_casillas_no_vacias
mas_freq, index_mas_freq = parte1.mas_frequente(v)
print('5.', contar(v))
print('6.', mas_freq)
print('7.', index_mas_freq)
|
[
"import soporte\nimport parte1\n\nv = soporte.vector_known_range(300000)\nc = [0] * 300000\n\n\ndef contar(vector):\n\tcontador_casillas_no_vacias = 0\n\tfor x in vector:\n\t\tc[x] += 1\n\n\tfor num in c:\n\t\tif num != 0:\n\t\t\tcontador_casillas_no_vacias += 1\n\treturn contador_casillas_no_vacias\n\n\nmas_freq, index_mas_freq = parte1.mas_frequente(v)\n\n\nprint('5.', contar(v))\nprint('6.', mas_freq)\nprint('7.', index_mas_freq)\n",
"import soporte\nimport parte1\nv = soporte.vector_known_range(300000)\nc = [0] * 300000\n\n\ndef contar(vector):\n contador_casillas_no_vacias = 0\n for x in vector:\n c[x] += 1\n for num in c:\n if num != 0:\n contador_casillas_no_vacias += 1\n return contador_casillas_no_vacias\n\n\nmas_freq, index_mas_freq = parte1.mas_frequente(v)\nprint('5.', contar(v))\nprint('6.', mas_freq)\nprint('7.', index_mas_freq)\n",
"<import token>\nv = soporte.vector_known_range(300000)\nc = [0] * 300000\n\n\ndef contar(vector):\n contador_casillas_no_vacias = 0\n for x in vector:\n c[x] += 1\n for num in c:\n if num != 0:\n contador_casillas_no_vacias += 1\n return contador_casillas_no_vacias\n\n\nmas_freq, index_mas_freq = parte1.mas_frequente(v)\nprint('5.', contar(v))\nprint('6.', mas_freq)\nprint('7.', index_mas_freq)\n",
"<import token>\n<assignment token>\n\n\ndef contar(vector):\n contador_casillas_no_vacias = 0\n for x in vector:\n c[x] += 1\n for num in c:\n if num != 0:\n contador_casillas_no_vacias += 1\n return contador_casillas_no_vacias\n\n\n<assignment token>\nprint('5.', contar(v))\nprint('6.', mas_freq)\nprint('7.', index_mas_freq)\n",
"<import token>\n<assignment token>\n\n\ndef contar(vector):\n contador_casillas_no_vacias = 0\n for x in vector:\n c[x] += 1\n for num in c:\n if num != 0:\n contador_casillas_no_vacias += 1\n return contador_casillas_no_vacias\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,512 |
1f97ab7cb40ff9555f5b33c6107c89ed7ab058b5
|
x=10
y=20
print(x+y)
s1='Hello'
s2=' Rishabh'
print(s1+s2)
l1=[1,2,3,4]
l2=[5,6,7,8]
print(l1+l2)
|
[
"x=10\ny=20\n\nprint(x+y)\n\n\ns1='Hello'\ns2=' Rishabh'\n\nprint(s1+s2)\n\nl1=[1,2,3,4]\nl2=[5,6,7,8]\n\nprint(l1+l2)",
"x = 10\ny = 20\nprint(x + y)\ns1 = 'Hello'\ns2 = ' Rishabh'\nprint(s1 + s2)\nl1 = [1, 2, 3, 4]\nl2 = [5, 6, 7, 8]\nprint(l1 + l2)\n",
"<assignment token>\nprint(x + y)\n<assignment token>\nprint(s1 + s2)\n<assignment token>\nprint(l1 + l2)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,513 |
e34bc7363cc05676e10f5d876c57e868989aa216
|
import copy
def decorator_deepcopy_arguments_and_return_value(f):
def f_wrapper(*args, **kwargs):
# deepcopy the arguments and keyword arguments
(copied_args, copied_kwargs) = tuple(
map(copy.deepcopy, (args, kwargs)))
# call the function
return_value = f(*copied_args, **copied_kwargs)
# deepcopy the return values
copied_return_value = copy.deepcopy(return_value)
return copied_return_value
return f_wrapper
|
[
"\nimport copy\n\n\ndef decorator_deepcopy_arguments_and_return_value(f):\n def f_wrapper(*args, **kwargs):\n # deepcopy the arguments and keyword arguments\n (copied_args, copied_kwargs) = tuple(\n map(copy.deepcopy, (args, kwargs)))\n # call the function\n return_value = f(*copied_args, **copied_kwargs)\n\n # deepcopy the return values\n copied_return_value = copy.deepcopy(return_value)\n return copied_return_value\n\n return f_wrapper\n",
"import copy\n\n\ndef decorator_deepcopy_arguments_and_return_value(f):\n\n def f_wrapper(*args, **kwargs):\n copied_args, copied_kwargs = tuple(map(copy.deepcopy, (args, kwargs)))\n return_value = f(*copied_args, **copied_kwargs)\n copied_return_value = copy.deepcopy(return_value)\n return copied_return_value\n return f_wrapper\n",
"<import token>\n\n\ndef decorator_deepcopy_arguments_and_return_value(f):\n\n def f_wrapper(*args, **kwargs):\n copied_args, copied_kwargs = tuple(map(copy.deepcopy, (args, kwargs)))\n return_value = f(*copied_args, **copied_kwargs)\n copied_return_value = copy.deepcopy(return_value)\n return copied_return_value\n return f_wrapper\n",
"<import token>\n<function token>\n"
] | false |
99,514 |
07268ced7c67b2975138403e6b66689d3664bbe8
|
my_dict = {
'a':50,
'b':58,
'c':56,
'd':40,
'e':100,
'f':20
}
max1=0
max2=2
max3=0
list1=[]
for i in my_dict.values():
list1.append(i)
j=0
while j<len(list1):
if list1[j]>max1:
max2=max1
max1=list1[j]
if max1>list1[j]and max2<list1[j]:
max2=max3
max2=list1[j]
if max3<max2 and max3<max1:
max3=list1[j]
max3=max2
j+=1
print(max1,max2,max3)
|
[
"my_dict = {\n 'a':50, \n 'b':58, \n 'c':56,\n 'd':40, \n 'e':100, \n 'f':20\n }\nmax1=0\nmax2=2\nmax3=0\nlist1=[]\nfor i in my_dict.values():\n list1.append(i)\nj=0\nwhile j<len(list1):\n if list1[j]>max1:\n max2=max1\n max1=list1[j]\n if max1>list1[j]and max2<list1[j]:\n max2=max3\n max2=list1[j]\n if max3<max2 and max3<max1:\n max3=list1[j]\n max3=max2\n j+=1\nprint(max1,max2,max3)",
"my_dict = {'a': 50, 'b': 58, 'c': 56, 'd': 40, 'e': 100, 'f': 20}\nmax1 = 0\nmax2 = 2\nmax3 = 0\nlist1 = []\nfor i in my_dict.values():\n list1.append(i)\nj = 0\nwhile j < len(list1):\n if list1[j] > max1:\n max2 = max1\n max1 = list1[j]\n if max1 > list1[j] and max2 < list1[j]:\n max2 = max3\n max2 = list1[j]\n if max3 < max2 and max3 < max1:\n max3 = list1[j]\n max3 = max2\n j += 1\nprint(max1, max2, max3)\n",
"<assignment token>\nfor i in my_dict.values():\n list1.append(i)\n<assignment token>\nwhile j < len(list1):\n if list1[j] > max1:\n max2 = max1\n max1 = list1[j]\n if max1 > list1[j] and max2 < list1[j]:\n max2 = max3\n max2 = list1[j]\n if max3 < max2 and max3 < max1:\n max3 = list1[j]\n max3 = max2\n j += 1\nprint(max1, max2, max3)\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,515 |
5d2331c3777e54d87f2d57d5255333a83d1e60ac
|
import numpy as np
import os
import matplotlib.pyplot as plt
# Input folder
#folder_path
# Opening file
file = open("Motor_test.txt","r")
# Reading the file
content = file.read()
file.close()
# Splitting each list of number first by \n then by coma
a = content.split("\n")
left_speeds = map(float, (a[1].replace(" ","")).split(","))
timeL = map(float, (a[3].replace(" ","")).split(","))
right_speeds = map(float, (a[5].replace(" ","")).split(","))
timeR = map(float, (a[7].replace(" ","")).split(","))
diff_time_left=[]
for i in range(1, len(timeL)):
diff_time_left.append(timeL[i] - timeL[i-1])
diff_time_right=[]
for i in range(1, len(timeR)):
diff_time_right.append(timeR[i] - timeR[i-1])
diff_time = []
for i in range(max(len(timeL), len(timeR))):
diff_time.append(timeL[i] - timeR[i])
speed_diff=[]
for i in range(max(len(left_speeds ), len(right_speeds ))):
speed_diff.append(left_speeds[i] - right_speeds[i])
sum_distance_left=[]
sum_distance_right=[]
sum_distance_left.append(0)
sum_distance_right.append(0)
for i in range(1,len(left_speeds)):
#print("left_speed:"+str(left_speeds[i]))
#print("right_speed:"+str(right_speeds[i]))
sum_distance_left.append( ((left_speeds[i]*(timeL[i]-timeL[i-1])*0.1)/6.0) + sum_distance_left[i-1] )
sum_distance_right.append( ((right_speeds[i]*(timeR[i]-timeR[i-1])*0.1)/6.0) + sum_distance_right[i-1] )
########################################################
## Evolution de la difference de vitesse )
########################################################
plt.xlabel('index of time')
plt.ylabel('Difference de distance en m')
plt.title('Evolution de la difference de vitesse ')
axis=np.linspace(0, len(speed_diff)-1,len(speed_diff) ) #len(timeR))
plt.plot(axis, speed_diff, label='Evolution de la difference de vitesse ')
plt.legend()
plt.savefig('Evolution_de_la_difference_de_vitesse.png')
plt.clf()
########################################################
## Evolution des distances parcourus gauche et droite
########################################################
plt.xlabel('index of time')
plt.ylabel('Distance gauche et droite parcouru en m')
plt.title('Evolution des distances parcourus gauche et droite ')
plt.plot(timeL, sum_distance_left, label='left distance')
plt.plot(timeR, sum_distance_right, label='right distance')
plt.legend()
plt.savefig('Evolution_des_distances_parcourus_gauche_et_droite.png')
plt.clf()
########################################################
## Evolution du flux du temps
########################################################
plt.xlabel('Time flux')
plt.ylabel('Flux of time')
plt.title('index')
length_of_plot=10
axis=np.linspace(0,length_of_plot-1,length_of_plot) #len(timeR))
plt.plot(axis,timeL[0:length_of_plot],label='left time')
plt.plot(axis,timeR[0:length_of_plot],label = 'right time')
#plt.plot(axis,axis, label = 'right time')
plt.legend()
plt.savefig('Evolution_of_time_flux.png')
plt.clf()
########################################################
## Evolution de la vitesse
########################################################
plt.xlabel('Time in s')
plt.ylabel('Speed in rad/s')
plt.title('Evolution of speed')
plt.plot(timeL, left_speeds,label='left speed')
plt.plot(timeR, right_speeds, label = 'right speed')
plt.legend()
plt.savefig('Evolution_of_speed.png')
plt.clf()
########################################################
## Difference of time
########################################################
axis_bis=np.linspace(0,len(diff_time_left)-1,len(diff_time_left)) #len(timeR))
plt.xlabel('index')
plt.ylabel('Difference of time s')
plt.title('Evolution of Difference of time ')
plt.plot(axis_bis, diff_time_left ,label='left difference of time')
plt.plot(axis_bis, diff_time_right, label = 'right difference of time')
plt.legend()
plt.savefig('Evolution of Difference of time.png')
plt.clf()
########################################################
## Difference of time between Left and right
########################################################
plt.xlabel('Time in s')
plt.ylabel('Difference of time in s')
plt.title('Difference of time')
plt.plot(range(len(diff_time)), diff_time)
plt.savefig('difference_of_time.png')
|
[
"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\n# Input folder\n#folder_path \n\n# Opening file\nfile = open(\"Motor_test.txt\",\"r\") \n\n# Reading the file\ncontent = file.read()\n\nfile.close()\n\n# Splitting each list of number first by \\n then by coma\na = content.split(\"\\n\")\nleft_speeds = map(float, (a[1].replace(\" \",\"\")).split(\",\"))\ntimeL = map(float, (a[3].replace(\" \",\"\")).split(\",\"))\nright_speeds = map(float, (a[5].replace(\" \",\"\")).split(\",\"))\ntimeR = map(float, (a[7].replace(\" \",\"\")).split(\",\"))\n\n\n\ndiff_time_left=[]\nfor i in range(1, len(timeL)):\n\tdiff_time_left.append(timeL[i] - timeL[i-1])\n\ndiff_time_right=[]\nfor i in range(1, len(timeR)):\n\tdiff_time_right.append(timeR[i] - timeR[i-1])\n\ndiff_time = []\n\nfor i in range(max(len(timeL), len(timeR))):\n\tdiff_time.append(timeL[i] - timeR[i])\n\nspeed_diff=[]\nfor i in range(max(len(left_speeds ), len(right_speeds ))):\n\tspeed_diff.append(left_speeds[i] - right_speeds[i])\n\n\nsum_distance_left=[]\nsum_distance_right=[]\nsum_distance_left.append(0)\nsum_distance_right.append(0)\nfor i in range(1,len(left_speeds)):\n\t#print(\"left_speed:\"+str(left_speeds[i]))\n\t#print(\"right_speed:\"+str(right_speeds[i]))\n\tsum_distance_left.append( ((left_speeds[i]*(timeL[i]-timeL[i-1])*0.1)/6.0) + sum_distance_left[i-1] )\n\tsum_distance_right.append( ((right_speeds[i]*(timeR[i]-timeR[i-1])*0.1)/6.0) + sum_distance_right[i-1] )\n\n\n\n########################################################\n## Evolution de la difference de vitesse ) \n########################################################\n\nplt.xlabel('index of time')\nplt.ylabel('Difference de distance en m')\nplt.title('Evolution de la difference de vitesse ')\n\n\naxis=np.linspace(0, len(speed_diff)-1,len(speed_diff) ) #len(timeR))\nplt.plot(axis, speed_diff, label='Evolution de la difference de vitesse ')\n\nplt.legend()\nplt.savefig('Evolution_de_la_difference_de_vitesse.png')\nplt.clf()\n\n\n########################################################\n## Evolution des distances parcourus gauche et droite \n########################################################\n\nplt.xlabel('index of time')\nplt.ylabel('Distance gauche et droite parcouru en m')\nplt.title('Evolution des distances parcourus gauche et droite ')\n\nplt.plot(timeL, sum_distance_left, label='left distance')\nplt.plot(timeR, sum_distance_right, label='right distance')\n\n\nplt.legend()\nplt.savefig('Evolution_des_distances_parcourus_gauche_et_droite.png')\nplt.clf()\n\n\n########################################################\n## Evolution du flux du temps \n########################################################\n\nplt.xlabel('Time flux')\nplt.ylabel('Flux of time')\nplt.title('index')\n\nlength_of_plot=10\naxis=np.linspace(0,length_of_plot-1,length_of_plot) #len(timeR))\nplt.plot(axis,timeL[0:length_of_plot],label='left time')\nplt.plot(axis,timeR[0:length_of_plot],label = 'right time')\n#plt.plot(axis,axis, label = 'right time')\n\nplt.legend()\nplt.savefig('Evolution_of_time_flux.png')\nplt.clf()\n\n########################################################\n## Evolution de la vitesse \n########################################################\n\n\nplt.xlabel('Time in s')\nplt.ylabel('Speed in rad/s')\nplt.title('Evolution of speed')\n\nplt.plot(timeL, left_speeds,label='left speed')\nplt.plot(timeR, right_speeds, label = 'right speed')\n\nplt.legend()\nplt.savefig('Evolution_of_speed.png')\nplt.clf()\n\n\n########################################################\n## Difference of time \n########################################################\n\n\naxis_bis=np.linspace(0,len(diff_time_left)-1,len(diff_time_left)) #len(timeR))\n\n\nplt.xlabel('index')\nplt.ylabel('Difference of time s')\nplt.title('Evolution of Difference of time ')\n\nplt.plot(axis_bis, diff_time_left ,label='left difference of time')\nplt.plot(axis_bis, diff_time_right, label = 'right difference of time')\n\nplt.legend()\nplt.savefig('Evolution of Difference of time.png')\nplt.clf()\n\n########################################################\n## Difference of time between Left and right\n########################################################\n\nplt.xlabel('Time in s')\nplt.ylabel('Difference of time in s')\nplt.title('Difference of time')\n\nplt.plot(range(len(diff_time)), diff_time)\n\nplt.savefig('difference_of_time.png')\n\n\n\n\n",
"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfile = open('Motor_test.txt', 'r')\ncontent = file.read()\nfile.close()\na = content.split('\\n')\nleft_speeds = map(float, a[1].replace(' ', '').split(','))\ntimeL = map(float, a[3].replace(' ', '').split(','))\nright_speeds = map(float, a[5].replace(' ', '').split(','))\ntimeR = map(float, a[7].replace(' ', '').split(','))\ndiff_time_left = []\nfor i in range(1, len(timeL)):\n diff_time_left.append(timeL[i] - timeL[i - 1])\ndiff_time_right = []\nfor i in range(1, len(timeR)):\n diff_time_right.append(timeR[i] - timeR[i - 1])\ndiff_time = []\nfor i in range(max(len(timeL), len(timeR))):\n diff_time.append(timeL[i] - timeR[i])\nspeed_diff = []\nfor i in range(max(len(left_speeds), len(right_speeds))):\n speed_diff.append(left_speeds[i] - right_speeds[i])\nsum_distance_left = []\nsum_distance_right = []\nsum_distance_left.append(0)\nsum_distance_right.append(0)\nfor i in range(1, len(left_speeds)):\n sum_distance_left.append(left_speeds[i] * (timeL[i] - timeL[i - 1]) * \n 0.1 / 6.0 + sum_distance_left[i - 1])\n sum_distance_right.append(right_speeds[i] * (timeR[i] - timeR[i - 1]) *\n 0.1 / 6.0 + sum_distance_right[i - 1])\nplt.xlabel('index of time')\nplt.ylabel('Difference de distance en m')\nplt.title('Evolution de la difference de vitesse ')\naxis = np.linspace(0, len(speed_diff) - 1, len(speed_diff))\nplt.plot(axis, speed_diff, label='Evolution de la difference de vitesse ')\nplt.legend()\nplt.savefig('Evolution_de_la_difference_de_vitesse.png')\nplt.clf()\nplt.xlabel('index of time')\nplt.ylabel('Distance gauche et droite parcouru en m')\nplt.title('Evolution des distances parcourus gauche et droite ')\nplt.plot(timeL, sum_distance_left, label='left distance')\nplt.plot(timeR, sum_distance_right, label='right distance')\nplt.legend()\nplt.savefig('Evolution_des_distances_parcourus_gauche_et_droite.png')\nplt.clf()\nplt.xlabel('Time flux')\nplt.ylabel('Flux of time')\nplt.title('index')\nlength_of_plot = 10\naxis = np.linspace(0, length_of_plot - 1, length_of_plot)\nplt.plot(axis, timeL[0:length_of_plot], label='left time')\nplt.plot(axis, timeR[0:length_of_plot], label='right time')\nplt.legend()\nplt.savefig('Evolution_of_time_flux.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Speed in rad/s')\nplt.title('Evolution of speed')\nplt.plot(timeL, left_speeds, label='left speed')\nplt.plot(timeR, right_speeds, label='right speed')\nplt.legend()\nplt.savefig('Evolution_of_speed.png')\nplt.clf()\naxis_bis = np.linspace(0, len(diff_time_left) - 1, len(diff_time_left))\nplt.xlabel('index')\nplt.ylabel('Difference of time s')\nplt.title('Evolution of Difference of time ')\nplt.plot(axis_bis, diff_time_left, label='left difference of time')\nplt.plot(axis_bis, diff_time_right, label='right difference of time')\nplt.legend()\nplt.savefig('Evolution of Difference of time.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Difference of time in s')\nplt.title('Difference of time')\nplt.plot(range(len(diff_time)), diff_time)\nplt.savefig('difference_of_time.png')\n",
"<import token>\nfile = open('Motor_test.txt', 'r')\ncontent = file.read()\nfile.close()\na = content.split('\\n')\nleft_speeds = map(float, a[1].replace(' ', '').split(','))\ntimeL = map(float, a[3].replace(' ', '').split(','))\nright_speeds = map(float, a[5].replace(' ', '').split(','))\ntimeR = map(float, a[7].replace(' ', '').split(','))\ndiff_time_left = []\nfor i in range(1, len(timeL)):\n diff_time_left.append(timeL[i] - timeL[i - 1])\ndiff_time_right = []\nfor i in range(1, len(timeR)):\n diff_time_right.append(timeR[i] - timeR[i - 1])\ndiff_time = []\nfor i in range(max(len(timeL), len(timeR))):\n diff_time.append(timeL[i] - timeR[i])\nspeed_diff = []\nfor i in range(max(len(left_speeds), len(right_speeds))):\n speed_diff.append(left_speeds[i] - right_speeds[i])\nsum_distance_left = []\nsum_distance_right = []\nsum_distance_left.append(0)\nsum_distance_right.append(0)\nfor i in range(1, len(left_speeds)):\n sum_distance_left.append(left_speeds[i] * (timeL[i] - timeL[i - 1]) * \n 0.1 / 6.0 + sum_distance_left[i - 1])\n sum_distance_right.append(right_speeds[i] * (timeR[i] - timeR[i - 1]) *\n 0.1 / 6.0 + sum_distance_right[i - 1])\nplt.xlabel('index of time')\nplt.ylabel('Difference de distance en m')\nplt.title('Evolution de la difference de vitesse ')\naxis = np.linspace(0, len(speed_diff) - 1, len(speed_diff))\nplt.plot(axis, speed_diff, label='Evolution de la difference de vitesse ')\nplt.legend()\nplt.savefig('Evolution_de_la_difference_de_vitesse.png')\nplt.clf()\nplt.xlabel('index of time')\nplt.ylabel('Distance gauche et droite parcouru en m')\nplt.title('Evolution des distances parcourus gauche et droite ')\nplt.plot(timeL, sum_distance_left, label='left distance')\nplt.plot(timeR, sum_distance_right, label='right distance')\nplt.legend()\nplt.savefig('Evolution_des_distances_parcourus_gauche_et_droite.png')\nplt.clf()\nplt.xlabel('Time flux')\nplt.ylabel('Flux of time')\nplt.title('index')\nlength_of_plot = 10\naxis = np.linspace(0, length_of_plot - 1, length_of_plot)\nplt.plot(axis, timeL[0:length_of_plot], label='left time')\nplt.plot(axis, timeR[0:length_of_plot], label='right time')\nplt.legend()\nplt.savefig('Evolution_of_time_flux.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Speed in rad/s')\nplt.title('Evolution of speed')\nplt.plot(timeL, left_speeds, label='left speed')\nplt.plot(timeR, right_speeds, label='right speed')\nplt.legend()\nplt.savefig('Evolution_of_speed.png')\nplt.clf()\naxis_bis = np.linspace(0, len(diff_time_left) - 1, len(diff_time_left))\nplt.xlabel('index')\nplt.ylabel('Difference of time s')\nplt.title('Evolution of Difference of time ')\nplt.plot(axis_bis, diff_time_left, label='left difference of time')\nplt.plot(axis_bis, diff_time_right, label='right difference of time')\nplt.legend()\nplt.savefig('Evolution of Difference of time.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Difference of time in s')\nplt.title('Difference of time')\nplt.plot(range(len(diff_time)), diff_time)\nplt.savefig('difference_of_time.png')\n",
"<import token>\n<assignment token>\nfile.close()\n<assignment token>\nfor i in range(1, len(timeL)):\n diff_time_left.append(timeL[i] - timeL[i - 1])\n<assignment token>\nfor i in range(1, len(timeR)):\n diff_time_right.append(timeR[i] - timeR[i - 1])\n<assignment token>\nfor i in range(max(len(timeL), len(timeR))):\n diff_time.append(timeL[i] - timeR[i])\n<assignment token>\nfor i in range(max(len(left_speeds), len(right_speeds))):\n speed_diff.append(left_speeds[i] - right_speeds[i])\n<assignment token>\nsum_distance_left.append(0)\nsum_distance_right.append(0)\nfor i in range(1, len(left_speeds)):\n sum_distance_left.append(left_speeds[i] * (timeL[i] - timeL[i - 1]) * \n 0.1 / 6.0 + sum_distance_left[i - 1])\n sum_distance_right.append(right_speeds[i] * (timeR[i] - timeR[i - 1]) *\n 0.1 / 6.0 + sum_distance_right[i - 1])\nplt.xlabel('index of time')\nplt.ylabel('Difference de distance en m')\nplt.title('Evolution de la difference de vitesse ')\n<assignment token>\nplt.plot(axis, speed_diff, label='Evolution de la difference de vitesse ')\nplt.legend()\nplt.savefig('Evolution_de_la_difference_de_vitesse.png')\nplt.clf()\nplt.xlabel('index of time')\nplt.ylabel('Distance gauche et droite parcouru en m')\nplt.title('Evolution des distances parcourus gauche et droite ')\nplt.plot(timeL, sum_distance_left, label='left distance')\nplt.plot(timeR, sum_distance_right, label='right distance')\nplt.legend()\nplt.savefig('Evolution_des_distances_parcourus_gauche_et_droite.png')\nplt.clf()\nplt.xlabel('Time flux')\nplt.ylabel('Flux of time')\nplt.title('index')\n<assignment token>\nplt.plot(axis, timeL[0:length_of_plot], label='left time')\nplt.plot(axis, timeR[0:length_of_plot], label='right time')\nplt.legend()\nplt.savefig('Evolution_of_time_flux.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Speed in rad/s')\nplt.title('Evolution of speed')\nplt.plot(timeL, left_speeds, label='left speed')\nplt.plot(timeR, right_speeds, label='right speed')\nplt.legend()\nplt.savefig('Evolution_of_speed.png')\nplt.clf()\n<assignment token>\nplt.xlabel('index')\nplt.ylabel('Difference of time s')\nplt.title('Evolution of Difference of time ')\nplt.plot(axis_bis, diff_time_left, label='left difference of time')\nplt.plot(axis_bis, diff_time_right, label='right difference of time')\nplt.legend()\nplt.savefig('Evolution of Difference of time.png')\nplt.clf()\nplt.xlabel('Time in s')\nplt.ylabel('Difference of time in s')\nplt.title('Difference of time')\nplt.plot(range(len(diff_time)), diff_time)\nplt.savefig('difference_of_time.png')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,516 |
d62160ced36203d94ceafbb3bffffcfec0b75c0c
|
from click.testing import CliRunner
from hokusai.cli.base import base
from test import HokusaiSmokeTestCase
class TestReviewApp(HokusaiSmokeTestCase):
def test_review_app_with_underscore(self):
runner = CliRunner()
result = runner.invoke(base, ['review_app'])
assert result.exit_code == 0
def test_review_app_with_dash(self):
runner = CliRunner()
result = runner.invoke(base, ['review-app'])
assert result.exit_code != 0
|
[
"from click.testing import CliRunner\nfrom hokusai.cli.base import base\nfrom test import HokusaiSmokeTestCase\n\nclass TestReviewApp(HokusaiSmokeTestCase):\n def test_review_app_with_underscore(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review_app'])\n assert result.exit_code == 0\n def test_review_app_with_dash(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review-app'])\n assert result.exit_code != 0\n",
"from click.testing import CliRunner\nfrom hokusai.cli.base import base\nfrom test import HokusaiSmokeTestCase\n\n\nclass TestReviewApp(HokusaiSmokeTestCase):\n\n def test_review_app_with_underscore(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review_app'])\n assert result.exit_code == 0\n\n def test_review_app_with_dash(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review-app'])\n assert result.exit_code != 0\n",
"<import token>\n\n\nclass TestReviewApp(HokusaiSmokeTestCase):\n\n def test_review_app_with_underscore(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review_app'])\n assert result.exit_code == 0\n\n def test_review_app_with_dash(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review-app'])\n assert result.exit_code != 0\n",
"<import token>\n\n\nclass TestReviewApp(HokusaiSmokeTestCase):\n <function token>\n\n def test_review_app_with_dash(self):\n runner = CliRunner()\n result = runner.invoke(base, ['review-app'])\n assert result.exit_code != 0\n",
"<import token>\n\n\nclass TestReviewApp(HokusaiSmokeTestCase):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,517 |
6eb51fa17181bb5f28e8335c2bf4af47c7d4218c
|
## SaveFile.py
## Mustafa Haddara
## Tue Mar-18-2014
import hashlib
import struct
from BoardException import *
'''
File Format
Size | Description of content
0x04 bytes | CK02 , where CK is the "signature" for the file type and 02 is the version of save file (future revisions)
0x10 bytes | that hold the raw data for the board.
0x01 byte | A byte that holds whos turn was last played and whether it was vs player or vs AI where the first five bits are reserved
| and the 6th is 0 if the first player controls red, 1 if the first player controls black
| and the 7th is 1 if the second player is human, 0 if AI and the eighth bit is whos turn as that bit + 1
0x01 byte | Contains turn number
0x01 byte | Contains the time remaining in the turn
0x20 bytes | An SHA256 hash of the previous content
'''
#saveFile: Saves the current game state to a file
#array: The array which holds the contents of the board; second_player: Whether the player is playing against a human or AI;
#turn: Who's turn it currently is; turn_number: The current number of turns played, currently unused.
def saveFile(array, first_player = 0, second_player = 0, turn = 0, turn_number = 1, time = 60):
## array of board positions
## first_player = 0 if first player controls red
## = 1 if first player controls black
## second_player = 0 if playing against computer,
## = 1 if playing against human
## turn = 0 if first player's turn,
## = 1 if second player's turn
outFile = open("save.chck", "wb")
data = "CK02"
## Writing data from the array into the data string
for i in array:
#data += str(i) + "i"
data += struct.pack('H',i)
data += struct.pack('B', (first_player << 2) | (second_player << 1) | turn)
data += struct.pack('B', turn_number)
data += struct.pack('B', time)
data += hash(data[:])
outFile.write(data)
outFile.close()
#loadFile: Loads save.chck and sets the game to where the player left off
#Returns, the board configuration, whether the second player is human or not, and who's turn it is.
def loadFile():
try:
inFile = open("save.chck", "rb")
except:
raise BoardException("No save file found!")
data = inFile.read()
boardData = data[:-32]
hashCheck = data[-32:]
if hash(boardData) != hashCheck or data[0:2] != "CK":
raise BoardException("Oi! Stop mucking with my save file!")
if (data[2:4] != "01") and (data[2:4] != "02"):
if int(data[2:4]) > 2:
raise BoardException("Unrecognized save file version. Please download version " + data[2:4] + " in order to load this game correctly.")
else:
raise BoardException("Unrecognized save file version. Unable to find appropriate version.")
inFile.close()
playerTurn = int(struct.unpack('B', boardData[20])[0]) & 0x1
turn_number = 0
#Some conditions for backward compatibility
if data[2:4] == "01":
first_player = 1
time = 60
elif data[2:4] == "02":
first_player = int(struct.unpack('B', boardData[20])[0])
turn_number = int(struct.unpack('B', boardData[21])[0])
time = int(struct.unpack('B', boardData[22])[0])
second_player = (first_player & 0x2) >> 1
first_player = (first_player & 0x4) >> 2
boardLayout = [];
for i in range(4,19,2):
boardLayout.append(int(struct.unpack('H', boardData[i:i+2])[0]))
#Prevent "replays" of using older versions
if data[2:4] == "01":
saveFile(boardLayout, first_player, second_player, playerTurn, turn_number, time)
boardLayout, first_player, second_player, playerTurn, turn_number,time = loadFile()
return boardLayout, first_player, second_player, playerTurn, turn_number,time
#hash: Hashes a string
#Returns: The hash as a sequence of bits rather than it's string representation
def hash(string):
secure = hashlib.sha256()
secure.update(string)
return secure.digest()
|
[
"## SaveFile.py\n## Mustafa Haddara\n## Tue Mar-18-2014\n\nimport hashlib\nimport struct\nfrom BoardException import *\n\n'''\nFile Format\nSize\t | Description of content\n0x04 bytes | CK02 , where CK is the \"signature\" for the file type and 02 is the version of save file (future revisions)\n0x10 bytes | that hold the raw data for the board.\n0x01 byte | A byte that holds whos turn was last played and whether it was vs player or vs AI where the first five bits are reserved\n\t\t | and the 6th is 0 if the first player controls red, 1 if the first player controls black\n\t\t | and the 7th is 1 if the second player is human, 0 if AI and the eighth bit is whos turn as that bit + 1\n0x01 byte | Contains turn number\n0x01 byte | Contains the time remaining in the turn\n0x20 bytes | An SHA256 hash of the previous content\n'''\n\n#saveFile: Saves the current game state to a file\n#array: The array which holds the contents of the board; second_player: Whether the player is playing against a human or AI;\n#turn: Who's turn it currently is; turn_number: The current number of turns played, currently unused.\ndef saveFile(array, first_player = 0, second_player = 0, turn = 0, turn_number = 1, time = 60):\n\t## array of board positions\n\t## first_player = 0 if first player controls red\n\t##\t\t\t\t = 1 if first player controls black\n\t## second_player = 0 if playing against computer, \n\t##\t\t\t\t = 1 if playing against human\n\t## turn = 0 if first player's turn, \n\t##\t\t= 1 if second player's turn\n\n\toutFile = open(\"save.chck\", \"wb\")\n\tdata = \"CK02\"\n\n\t## Writing data from the array into the data string\n\tfor i in array:\n\t\t#data += str(i) + \"i\"\n\t\tdata += struct.pack('H',i)\n\tdata += struct.pack('B', (first_player << 2) | (second_player << 1) | turn)\n\tdata += struct.pack('B', turn_number)\n\tdata += struct.pack('B', time)\n\tdata += hash(data[:]) \n\t\n\toutFile.write(data)\n\toutFile.close()\n\n#loadFile: Loads save.chck and sets the game to where the player left off\n#Returns, the board configuration, whether the second player is human or not, and who's turn it is.\ndef loadFile():\n\ttry:\n\t\tinFile = open(\"save.chck\", \"rb\")\n\texcept:\n\t\traise BoardException(\"No save file found!\")\n\tdata = inFile.read()\n\tboardData = data[:-32]\n\thashCheck = data[-32:]\n\tif hash(boardData) != hashCheck or data[0:2] != \"CK\":\n\t\traise BoardException(\"Oi! Stop mucking with my save file!\")\n\tif (data[2:4] != \"01\") and (data[2:4] != \"02\"):\n\t\tif int(data[2:4]) > 2: \n\t\t\traise BoardException(\"Unrecognized save file version. Please download version \" + data[2:4] + \" in order to load this game correctly.\")\n\t\telse:\n\t\t\traise BoardException(\"Unrecognized save file version. Unable to find appropriate version.\")\n\tinFile.close()\n\tplayerTurn = int(struct.unpack('B', boardData[20])[0]) & 0x1\n\tturn_number = 0\n\t#Some conditions for backward compatibility\n\tif data[2:4] == \"01\":\n\t\tfirst_player = 1\n\t\ttime = 60\n\telif data[2:4] == \"02\":\n\t\tfirst_player = int(struct.unpack('B', boardData[20])[0]) \n\t\tturn_number = int(struct.unpack('B', boardData[21])[0])\n\t\ttime = int(struct.unpack('B', boardData[22])[0])\n\tsecond_player = (first_player & 0x2) >> 1\n\tfirst_player = (first_player & 0x4) >> 2\n\tboardLayout = [];\n\tfor i in range(4,19,2):\n\t\tboardLayout.append(int(struct.unpack('H', boardData[i:i+2])[0]))\n\t#Prevent \"replays\" of using older versions\n\tif data[2:4] == \"01\":\n\t\tsaveFile(boardLayout, first_player, second_player, playerTurn, turn_number, time)\n\t\tboardLayout, first_player, second_player, playerTurn, turn_number,time = loadFile()\n\treturn boardLayout, first_player, second_player, playerTurn, turn_number,time\n\n#hash: Hashes a string\n#Returns: The hash as a sequence of bits rather than it's string representation\ndef hash(string):\n\tsecure = hashlib.sha256()\n\tsecure.update(string)\n\treturn secure.digest()\n",
"import hashlib\nimport struct\nfrom BoardException import *\n<docstring token>\n\n\ndef saveFile(array, first_player=0, second_player=0, turn=0, turn_number=1,\n time=60):\n outFile = open('save.chck', 'wb')\n data = 'CK02'\n for i in array:\n data += struct.pack('H', i)\n data += struct.pack('B', first_player << 2 | second_player << 1 | turn)\n data += struct.pack('B', turn_number)\n data += struct.pack('B', time)\n data += hash(data[:])\n outFile.write(data)\n outFile.close()\n\n\ndef loadFile():\n try:\n inFile = open('save.chck', 'rb')\n except:\n raise BoardException('No save file found!')\n data = inFile.read()\n boardData = data[:-32]\n hashCheck = data[-32:]\n if hash(boardData) != hashCheck or data[0:2] != 'CK':\n raise BoardException('Oi! Stop mucking with my save file!')\n if data[2:4] != '01' and data[2:4] != '02':\n if int(data[2:4]) > 2:\n raise BoardException(\n 'Unrecognized save file version. Please download version ' +\n data[2:4] + ' in order to load this game correctly.')\n else:\n raise BoardException(\n 'Unrecognized save file version. Unable to find appropriate version.'\n )\n inFile.close()\n playerTurn = int(struct.unpack('B', boardData[20])[0]) & 1\n turn_number = 0\n if data[2:4] == '01':\n first_player = 1\n time = 60\n elif data[2:4] == '02':\n first_player = int(struct.unpack('B', boardData[20])[0])\n turn_number = int(struct.unpack('B', boardData[21])[0])\n time = int(struct.unpack('B', boardData[22])[0])\n second_player = (first_player & 2) >> 1\n first_player = (first_player & 4) >> 2\n boardLayout = []\n for i in range(4, 19, 2):\n boardLayout.append(int(struct.unpack('H', boardData[i:i + 2])[0]))\n if data[2:4] == '01':\n saveFile(boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n (boardLayout, first_player, second_player, playerTurn, turn_number,\n time) = loadFile()\n return (boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n\n\ndef hash(string):\n secure = hashlib.sha256()\n secure.update(string)\n return secure.digest()\n",
"<import token>\n<docstring token>\n\n\ndef saveFile(array, first_player=0, second_player=0, turn=0, turn_number=1,\n time=60):\n outFile = open('save.chck', 'wb')\n data = 'CK02'\n for i in array:\n data += struct.pack('H', i)\n data += struct.pack('B', first_player << 2 | second_player << 1 | turn)\n data += struct.pack('B', turn_number)\n data += struct.pack('B', time)\n data += hash(data[:])\n outFile.write(data)\n outFile.close()\n\n\ndef loadFile():\n try:\n inFile = open('save.chck', 'rb')\n except:\n raise BoardException('No save file found!')\n data = inFile.read()\n boardData = data[:-32]\n hashCheck = data[-32:]\n if hash(boardData) != hashCheck or data[0:2] != 'CK':\n raise BoardException('Oi! Stop mucking with my save file!')\n if data[2:4] != '01' and data[2:4] != '02':\n if int(data[2:4]) > 2:\n raise BoardException(\n 'Unrecognized save file version. Please download version ' +\n data[2:4] + ' in order to load this game correctly.')\n else:\n raise BoardException(\n 'Unrecognized save file version. Unable to find appropriate version.'\n )\n inFile.close()\n playerTurn = int(struct.unpack('B', boardData[20])[0]) & 1\n turn_number = 0\n if data[2:4] == '01':\n first_player = 1\n time = 60\n elif data[2:4] == '02':\n first_player = int(struct.unpack('B', boardData[20])[0])\n turn_number = int(struct.unpack('B', boardData[21])[0])\n time = int(struct.unpack('B', boardData[22])[0])\n second_player = (first_player & 2) >> 1\n first_player = (first_player & 4) >> 2\n boardLayout = []\n for i in range(4, 19, 2):\n boardLayout.append(int(struct.unpack('H', boardData[i:i + 2])[0]))\n if data[2:4] == '01':\n saveFile(boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n (boardLayout, first_player, second_player, playerTurn, turn_number,\n time) = loadFile()\n return (boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n\n\ndef hash(string):\n secure = hashlib.sha256()\n secure.update(string)\n return secure.digest()\n",
"<import token>\n<docstring token>\n\n\ndef saveFile(array, first_player=0, second_player=0, turn=0, turn_number=1,\n time=60):\n outFile = open('save.chck', 'wb')\n data = 'CK02'\n for i in array:\n data += struct.pack('H', i)\n data += struct.pack('B', first_player << 2 | second_player << 1 | turn)\n data += struct.pack('B', turn_number)\n data += struct.pack('B', time)\n data += hash(data[:])\n outFile.write(data)\n outFile.close()\n\n\ndef loadFile():\n try:\n inFile = open('save.chck', 'rb')\n except:\n raise BoardException('No save file found!')\n data = inFile.read()\n boardData = data[:-32]\n hashCheck = data[-32:]\n if hash(boardData) != hashCheck or data[0:2] != 'CK':\n raise BoardException('Oi! Stop mucking with my save file!')\n if data[2:4] != '01' and data[2:4] != '02':\n if int(data[2:4]) > 2:\n raise BoardException(\n 'Unrecognized save file version. Please download version ' +\n data[2:4] + ' in order to load this game correctly.')\n else:\n raise BoardException(\n 'Unrecognized save file version. Unable to find appropriate version.'\n )\n inFile.close()\n playerTurn = int(struct.unpack('B', boardData[20])[0]) & 1\n turn_number = 0\n if data[2:4] == '01':\n first_player = 1\n time = 60\n elif data[2:4] == '02':\n first_player = int(struct.unpack('B', boardData[20])[0])\n turn_number = int(struct.unpack('B', boardData[21])[0])\n time = int(struct.unpack('B', boardData[22])[0])\n second_player = (first_player & 2) >> 1\n first_player = (first_player & 4) >> 2\n boardLayout = []\n for i in range(4, 19, 2):\n boardLayout.append(int(struct.unpack('H', boardData[i:i + 2])[0]))\n if data[2:4] == '01':\n saveFile(boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n (boardLayout, first_player, second_player, playerTurn, turn_number,\n time) = loadFile()\n return (boardLayout, first_player, second_player, playerTurn,\n turn_number, time)\n\n\n<function token>\n",
"<import token>\n<docstring token>\n\n\ndef saveFile(array, first_player=0, second_player=0, turn=0, turn_number=1,\n time=60):\n outFile = open('save.chck', 'wb')\n data = 'CK02'\n for i in array:\n data += struct.pack('H', i)\n data += struct.pack('B', first_player << 2 | second_player << 1 | turn)\n data += struct.pack('B', turn_number)\n data += struct.pack('B', time)\n data += hash(data[:])\n outFile.write(data)\n outFile.close()\n\n\n<function token>\n<function token>\n",
"<import token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,518 |
1a18b81669dc0572c2d333964a9f82ab3d0be3ee
|
"""Tests for the BSBLan integration."""
import aiohttp
from homeassistant.components.bsblan.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from tests.components.bsblan import init_integration, init_integration_without_auth
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the BSBLan configuration entry not ready."""
aioclient_mock.post(
"http://example.local:80/1234/JQ?Parameter=6224,6225,6226",
exc=aiohttp.ClientError,
)
entry = await init_integration(hass, aioclient_mock)
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the BSBLan configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
async def test_config_entry_no_authentication(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the BSBLan configuration entry not ready."""
aioclient_mock.post(
"http://example.local:80/1234/JQ?Parameter=6224,6225,6226",
exc=aiohttp.ClientError,
)
entry = await init_integration_without_auth(hass, aioclient_mock)
assert entry.state is ConfigEntryState.SETUP_RETRY
|
[
"\"\"\"Tests for the BSBLan integration.\"\"\"\nimport aiohttp\n\nfrom homeassistant.components.bsblan.const import DOMAIN\nfrom homeassistant.config_entries import ConfigEntryState\nfrom homeassistant.core import HomeAssistant\n\nfrom tests.components.bsblan import init_integration, init_integration_without_auth\nfrom tests.test_util.aiohttp import AiohttpClientMocker\n\n\nasync def test_config_entry_not_ready(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n \"http://example.local:80/1234/JQ?Parameter=6224,6225,6226\",\n exc=aiohttp.ClientError,\n )\n\n entry = await init_integration(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n\n\nasync def test_unload_config_entry(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n \"\"\"Test the BSBLan configuration entry unloading.\"\"\"\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)\n\n\nasync def test_config_entry_no_authentication(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n \"http://example.local:80/1234/JQ?Parameter=6224,6225,6226\",\n exc=aiohttp.ClientError,\n )\n\n entry = await init_integration_without_auth(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n",
"<docstring token>\nimport aiohttp\nfrom homeassistant.components.bsblan.const import DOMAIN\nfrom homeassistant.config_entries import ConfigEntryState\nfrom homeassistant.core import HomeAssistant\nfrom tests.components.bsblan import init_integration, init_integration_without_auth\nfrom tests.test_util.aiohttp import AiohttpClientMocker\n\n\nasync def test_config_entry_not_ready(hass: HomeAssistant, aioclient_mock:\n AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n 'http://example.local:80/1234/JQ?Parameter=6224,6225,6226', exc=\n aiohttp.ClientError)\n entry = await init_integration(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n\n\nasync def test_unload_config_entry(hass: HomeAssistant, aioclient_mock:\n AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry unloading.\"\"\"\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)\n\n\nasync def test_config_entry_no_authentication(hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n 'http://example.local:80/1234/JQ?Parameter=6224,6225,6226', exc=\n aiohttp.ClientError)\n entry = await init_integration_without_auth(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n",
"<docstring token>\n<import token>\n\n\nasync def test_config_entry_not_ready(hass: HomeAssistant, aioclient_mock:\n AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n 'http://example.local:80/1234/JQ?Parameter=6224,6225,6226', exc=\n aiohttp.ClientError)\n entry = await init_integration(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n\n\nasync def test_unload_config_entry(hass: HomeAssistant, aioclient_mock:\n AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry unloading.\"\"\"\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)\n\n\nasync def test_config_entry_no_authentication(hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker) ->None:\n \"\"\"Test the BSBLan configuration entry not ready.\"\"\"\n aioclient_mock.post(\n 'http://example.local:80/1234/JQ?Parameter=6224,6225,6226', exc=\n aiohttp.ClientError)\n entry = await init_integration_without_auth(hass, aioclient_mock)\n assert entry.state is ConfigEntryState.SETUP_RETRY\n",
"<docstring token>\n<import token>\n<code token>\n"
] | false |
99,519 |
953d250081ad1cdb7855b0395b8479ef02f1b0ee
|
__author__ = 'Irwan Fathurrahman <[email protected]>'
__date__ = '15/06/20'
import datetime
import json
import os
import requests
import time
from django.conf import settings
from django.test import TestCase
from django.utils.dateparse import parse_datetime
class TestAnalyzeImpact(TestCase):
def setUp(self):
self.forecast_date_range_start = "1990-01-01T17:00:00.000Z"
self.forecast_date_range_end = "1990-01-30T17:00:00.000Z"
# we check if our queue is empty
url = os.path.join(
settings.POSTGREST_BASE_URL,
'hazard_event_queue')
is_empty = False
while not is_empty:
response = requests.get(url)
is_empty = len(response.json()) == 0
if not is_empty:
time.sleep(10)
def test_peformance(self):
""" Test impact calculation peformance
and also return time of progress
"""
timedeltas = []
for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):
_file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file), "r")
report = json.loads(_file.read())
timedeltas.append(
parse_datetime(report['finish']) - parse_datetime(report['start']))
_file.close()
# number of queue
print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))
# get average time
average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)
print('AVERAGE = {}'.format(average_timedelta))
self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))
# get total process time
total = timedeltas[0]
for delta in timedeltas[:1]:
total += delta
print('TOTAL = {}'.format(total))
self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas)))
def test_event_historical(self):
""" Test for return list of historical event
"""
url = os.path.join(
settings.POSTGREST_BASE_URL,
'rpc/flood_event_historical_forecast_list_f')
events = requests.post(url, data={
'forecast_date_range_start': self.forecast_date_range_start,
'forecast_date_range_end': self.forecast_date_range_end
}).json()
self.assertTrue(len(events) != 0)
def test_hazard_event(self):
""" Test of hazard event in date of tests
"""
url = os.path.join(
settings.POSTGREST_BASE_URL,
'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})'
'&order=acquisition_date.desc'.format(
self.forecast_date_range_start, self.forecast_date_range_end
))
events = requests.get(url).json()
# district summary fixture
district_summary_fixture = os.path.join(
settings.FIXTURES, 'test', 'district_summary.json')
with open(district_summary_fixture, 'r') as _file:
district_summary_fixture = json.load(_file)
# road district summary fixture
road_district_summary_fixture = os.path.join(
settings.FIXTURES, 'test', 'road_district_summary.json')
with open(road_district_summary_fixture, 'r') as _file:
road_district_summary_fixture = json.load(_file)
# world pop district summary fixture
world_pop_district_summary = os.path.join(
settings.FIXTURES, 'test', 'world_pop_district_summary.json')
with open(world_pop_district_summary, 'r') as _file:
world_pop_district_summary = json.load(_file)
for event in events:
# check district summary
url = os.path.join(
settings.POSTGREST_BASE_URL,
'mv_flood_event_district_summary?flood_event_id=eq.{}'.format(
event['id']))
for summary in requests.get(url).json():
fixture = district_summary_fixture[summary['name']]
for key, value in summary.items():
if key == 'flood_event_id':
return
self.assertEqual(value, fixture[key])
# check road district summary
url = os.path.join(
settings.POSTGREST_BASE_URL,
'mv_flood_event_road_district_summary?flood_event_id=eq.{}'.format(
event['id']))
for summary in requests.get(url).json():
fixture = road_district_summary_fixture[summary['name']]
for key, value in summary.items():
if key == 'flood_event_id':
return
self.assertEqual(value, fixture[key])
# check world pop district summary
url = os.path.join(
settings.POSTGREST_BASE_URL,
'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'.format(
event['id']))
for summary in requests.get(url).json():
fixture = world_pop_district_summary[summary['name']]
for key, value in summary.items():
if key == 'flood_event_id':
return
self.assertEqual(value, fixture[key])
|
[
"__author__ = 'Irwan Fathurrahman <[email protected]>'\n__date__ = '15/06/20'\n\nimport datetime\nimport json\nimport os\nimport requests\nimport time\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.utils.dateparse import parse_datetime\n\n\nclass TestAnalyzeImpact(TestCase):\n\n def setUp(self):\n self.forecast_date_range_start = \"1990-01-01T17:00:00.000Z\"\n self.forecast_date_range_end = \"1990-01-30T17:00:00.000Z\"\n # we check if our queue is empty\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'hazard_event_queue')\n is_empty = False\n while not is_empty:\n response = requests.get(url)\n is_empty = len(response.json()) == 0\n if not is_empty:\n time.sleep(10)\n\n def test_peformance(self):\n \"\"\" Test impact calculation peformance\n and also return time of progress\n \"\"\"\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file), \"r\")\n report = json.loads(_file.read())\n timedeltas.append(\n parse_datetime(report['finish']) - parse_datetime(report['start']))\n _file.close()\n\n # number of queue\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n\n # get average time\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n\n # get total process time\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas)))\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={\n 'forecast_date_range_start': self.forecast_date_range_start,\n 'forecast_date_range_end': self.forecast_date_range_end\n }).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})'\n '&order=acquisition_date.desc'.format(\n self.forecast_date_range_start, self.forecast_date_range_end\n ))\n events = requests.get(url).json()\n\n # district summary fixture\n district_summary_fixture = os.path.join(\n settings.FIXTURES, 'test', 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n\n # road district summary fixture\n road_district_summary_fixture = os.path.join(\n settings.FIXTURES, 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n\n # world pop district summary fixture\n world_pop_district_summary = os.path.join(\n settings.FIXTURES, 'test', 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n\n for event in events:\n # check district summary\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.format(\n event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n\n # check road district summary\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'.format(\n event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n\n # check world pop district summary\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'.format(\n event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"__author__ = 'Irwan Fathurrahman <[email protected]>'\n__date__ = '15/06/20'\nimport datetime\nimport json\nimport os\nimport requests\nimport time\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom django.utils.dateparse import parse_datetime\n\n\nclass TestAnalyzeImpact(TestCase):\n\n def setUp(self):\n self.forecast_date_range_start = '1990-01-01T17:00:00.000Z'\n self.forecast_date_range_end = '1990-01-30T17:00:00.000Z'\n url = os.path.join(settings.POSTGREST_BASE_URL, 'hazard_event_queue')\n is_empty = False\n while not is_empty:\n response = requests.get(url)\n is_empty = len(response.json()) == 0\n if not is_empty:\n time.sleep(10)\n\n def test_peformance(self):\n \"\"\" Test impact calculation peformance\n and also return time of progress\n \"\"\"\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file\n ), 'r')\n report = json.loads(_file.read())\n timedeltas.append(parse_datetime(report['finish']) -\n parse_datetime(report['start']))\n _file.close()\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(\n timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas))\n )\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})&order=acquisition_date.desc'\n .format(self.forecast_date_range_start, self.\n forecast_date_range_end))\n events = requests.get(url).json()\n district_summary_fixture = os.path.join(settings.FIXTURES, 'test',\n 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n road_district_summary_fixture = os.path.join(settings.FIXTURES,\n 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n world_pop_district_summary = os.path.join(settings.FIXTURES, 'test',\n 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n for event in events:\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.\n format(event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"__author__ = 'Irwan Fathurrahman <[email protected]>'\n__date__ = '15/06/20'\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n\n def setUp(self):\n self.forecast_date_range_start = '1990-01-01T17:00:00.000Z'\n self.forecast_date_range_end = '1990-01-30T17:00:00.000Z'\n url = os.path.join(settings.POSTGREST_BASE_URL, 'hazard_event_queue')\n is_empty = False\n while not is_empty:\n response = requests.get(url)\n is_empty = len(response.json()) == 0\n if not is_empty:\n time.sleep(10)\n\n def test_peformance(self):\n \"\"\" Test impact calculation peformance\n and also return time of progress\n \"\"\"\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file\n ), 'r')\n report = json.loads(_file.read())\n timedeltas.append(parse_datetime(report['finish']) -\n parse_datetime(report['start']))\n _file.close()\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(\n timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas))\n )\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})&order=acquisition_date.desc'\n .format(self.forecast_date_range_start, self.\n forecast_date_range_end))\n events = requests.get(url).json()\n district_summary_fixture = os.path.join(settings.FIXTURES, 'test',\n 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n road_district_summary_fixture = os.path.join(settings.FIXTURES,\n 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n world_pop_district_summary = os.path.join(settings.FIXTURES, 'test',\n 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n for event in events:\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.\n format(event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"<assignment token>\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n\n def setUp(self):\n self.forecast_date_range_start = '1990-01-01T17:00:00.000Z'\n self.forecast_date_range_end = '1990-01-30T17:00:00.000Z'\n url = os.path.join(settings.POSTGREST_BASE_URL, 'hazard_event_queue')\n is_empty = False\n while not is_empty:\n response = requests.get(url)\n is_empty = len(response.json()) == 0\n if not is_empty:\n time.sleep(10)\n\n def test_peformance(self):\n \"\"\" Test impact calculation peformance\n and also return time of progress\n \"\"\"\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file\n ), 'r')\n report = json.loads(_file.read())\n timedeltas.append(parse_datetime(report['finish']) -\n parse_datetime(report['start']))\n _file.close()\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(\n timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas))\n )\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})&order=acquisition_date.desc'\n .format(self.forecast_date_range_start, self.\n forecast_date_range_end))\n events = requests.get(url).json()\n district_summary_fixture = os.path.join(settings.FIXTURES, 'test',\n 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n road_district_summary_fixture = os.path.join(settings.FIXTURES,\n 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n world_pop_district_summary = os.path.join(settings.FIXTURES, 'test',\n 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n for event in events:\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.\n format(event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"<assignment token>\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n <function token>\n\n def test_peformance(self):\n \"\"\" Test impact calculation peformance\n and also return time of progress\n \"\"\"\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file\n ), 'r')\n report = json.loads(_file.read())\n timedeltas.append(parse_datetime(report['finish']) -\n parse_datetime(report['start']))\n _file.close()\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(\n timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas))\n )\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})&order=acquisition_date.desc'\n .format(self.forecast_date_range_start, self.\n forecast_date_range_end))\n events = requests.get(url).json()\n district_summary_fixture = os.path.join(settings.FIXTURES, 'test',\n 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n road_district_summary_fixture = os.path.join(settings.FIXTURES,\n 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n world_pop_district_summary = os.path.join(settings.FIXTURES, 'test',\n 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n for event in events:\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.\n format(event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"<assignment token>\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n <function token>\n <function token>\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n\n def test_hazard_event(self):\n \"\"\" Test of hazard event in date of tests\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'hazard_event?and=(forecast_date.gte.{},forecast_date.lt.{})&order=acquisition_date.desc'\n .format(self.forecast_date_range_start, self.\n forecast_date_range_end))\n events = requests.get(url).json()\n district_summary_fixture = os.path.join(settings.FIXTURES, 'test',\n 'district_summary.json')\n with open(district_summary_fixture, 'r') as _file:\n district_summary_fixture = json.load(_file)\n road_district_summary_fixture = os.path.join(settings.FIXTURES,\n 'test', 'road_district_summary.json')\n with open(road_district_summary_fixture, 'r') as _file:\n road_district_summary_fixture = json.load(_file)\n world_pop_district_summary = os.path.join(settings.FIXTURES, 'test',\n 'world_pop_district_summary.json')\n with open(world_pop_district_summary, 'r') as _file:\n world_pop_district_summary = json.load(_file)\n for event in events:\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_district_summary?flood_event_id=eq.{}'.\n format(event['id']))\n for summary in requests.get(url).json():\n fixture = district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_road_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = road_district_summary_fixture[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'mv_flood_event_world_pop_district_summary?flood_event_id=eq.{}'\n .format(event['id']))\n for summary in requests.get(url).json():\n fixture = world_pop_district_summary[summary['name']]\n for key, value in summary.items():\n if key == 'flood_event_id':\n return\n self.assertEqual(value, fixture[key])\n",
"<assignment token>\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n <function token>\n <function token>\n\n def test_event_historical(self):\n \"\"\" Test for return list of historical event\n \"\"\"\n url = os.path.join(settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={'forecast_date_range_start': self\n .forecast_date_range_start, 'forecast_date_range_end': self.\n forecast_date_range_end}).json()\n self.assertTrue(len(events) != 0)\n <function token>\n",
"<assignment token>\n<import token>\n\n\nclass TestAnalyzeImpact(TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<assignment token>\n<import token>\n<class token>\n"
] | false |
99,520 |
41d2e2cb966e864a2c4c9eabb86eb22702842c68
|
from django.contrib import admin
# Register your models here.
from .models import Payee, Customer, CustomerAccount, FundTransfer
admin.site.register(Payee)
admin.site.register(Customer)
admin.site.register(CustomerAccount)
admin.site.register(FundTransfer)
|
[
"from django.contrib import admin\n\n# Register your models here.\n\n\nfrom .models import Payee, Customer, CustomerAccount, FundTransfer\n\nadmin.site.register(Payee)\nadmin.site.register(Customer)\nadmin.site.register(CustomerAccount)\nadmin.site.register(FundTransfer)\n",
"from django.contrib import admin\nfrom .models import Payee, Customer, CustomerAccount, FundTransfer\nadmin.site.register(Payee)\nadmin.site.register(Customer)\nadmin.site.register(CustomerAccount)\nadmin.site.register(FundTransfer)\n",
"<import token>\nadmin.site.register(Payee)\nadmin.site.register(Customer)\nadmin.site.register(CustomerAccount)\nadmin.site.register(FundTransfer)\n",
"<import token>\n<code token>\n"
] | false |
99,521 |
93842ba636015563f2e89651c0aed2718d74310e
|
'''
Created on Sep 2, 2014
@author: Kevin
'''
from test.pickletester import MyList
# from Adam Barr's wonderful book "Find the Bug"
''' This function draws a card from a deck and puts it into a hand. It is
meant to be part of the game Go Fish, so if the resulting hand has all four
suits for a given card rank, those four cards are removed from the hand.
Cards are identified by their rank and suit: the rank is one of the elements
in the list ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"]
and the suit is on of the elements of the list ["spades", "hearts", "diamonds", "clubs"].
A deck is a list that initially contains 52 elements. Each element of the list
is a tuple with two elements: the rank and the suit. So a single entry
in the deck might be the tuple ("K", "spades"), which is the king of spades.
A hand is a dictionary. In each element of the dictionary, the key is
the rank and the value is a list that contains the names of the suits that the hand
holds for that rank. E.g., if a hand as the 3 of spades and the 3 of hearts, and
no other 3s, then the key "3" has the value ["spades", "hearts"]. A key should not
have an empty list associated with it - if no cards of a given rank are held,
no value exists for that key.'''
import random
import sys
rankList = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"]
suitList = ["spades", "hearts", "diamonds", "clubs"]
GOFISH = "Go Fish"
def log_stdout(msg):
'''Print msg to the screen.'''
print(msg)
def makeDeck():
''' Creates a deck.
A deck is a list that initially contains 52 elements. Each element of the list
is a tuple with two elements: the rank and the suit. So a single entry
in the deck might be the tuple ("K", "spades"), which is the king of spades.
'''
deck = []
for r in rankList:
for s in suitList:
deck.append([r, s])
return deck
def getCard(deck):
''' Randomly remove a single card from the deck and return it. Assumes that the deck
is not empty.
deck: A deck as described above
Returns: a single card, which is a tuple with two elements, the rank and the suit
'''
index = random.randint(0,len(deck)-1)
newCard = deck[index]
del deck[index]
return newCard
def askForCard(requestor, requestorHand, giver, giverHand ):
'''Asks other player for a needed rank '''
if len(requestorHand) == 0:
print("%s has no cards. %s" %(requestor,GOFISH))
return
#find the rank with maximum count
maxKey = GOFISH
maxCount = 0
for key in requestorHand:
count=len(requestorHand.get(key))
if count > maxCount:
maxKey = key
maxCount = count
if len(giverHand) == 0:
print("%s has requested %s but %s has no cards. %s" %(requestor,maxKey,giver,GOFISH))
return GOFISH
received = giverHand.get(maxKey,GOFISH)
print("%s asked %s for %s and the answer was %s" %(requestor,giver, maxKey,received))
if received == GOFISH:
return GOFISH
for value in received:
requestorHand[maxKey].append(value)
del giverHand[maxKey]
return received
def drawCard(name, deck, hand):
''' Draw a new card from the deck and add it to the hand. If the hand now holds the rank
in all four suits, then remove them from the hand.
name: A string with the name of the playerHand, used only for display purposes.
deck: A deck as described above
hand: A hand dictionary as described above
Returns: None.
'''
if len(deck) > 0: # protect against empty deck
newCard = getCard(deck)
cardRank = newCard[0]
cardSuit = newCard[1]
else:
return
if cardRank in hand:
# append this suit to the result
hand[cardRank].append(cardSuit)
else:
# first of this suit, create a list with one element
hand[cardRank] = [ cardSuit ]
def initHand(deck,hand,numberOfCards):
for i in range(numberOfCards):
newCard=getCard(deck)
cardRank = newCard[0]
cardSuit = newCard[1]
testList = hand.get(cardRank,"notAsuitSuit")
if testList == "notAsuitSuit":
hand[cardRank]=[cardSuit]
else:
hand[cardRank].append(cardSuit)
def playHand(name,hand):
played=False
for r in rankList:
cardSuits=hand.get(r,"notAsuitSuit")
if len(cardSuits) == 4:
print('%s %s %s' % (name, "lay down", r + "s"))
del hand[r]
played=True
if not played:
print("player %s has nothing to play" %(name))
class GoFish:
''' Play a game of Go Fish!
'''
def __init__(self, playerList=["DefaultPlayer1", "DefaultPlayer2"]):
if (len(playerList)>0):
tempPlayers = playerList
else:
tempPlayers = ["DefaultPlayer1", "DefaultPlayer2"]
self.deck=makeDeck()
self.players={}
initCardCount=7
if (len(tempPlayers)>4):
initCardCount=5
for name in tempPlayers:
self.players[name]={}
initHand(self.deck, self.players[name], initCardCount)
def autoPlay(self):
'''Plays a game of GoFish'''
notDone = True
roundNumber = 0
whoToAsk={}
while notDone:
roundNumber+=1
print('Round %i !' % (roundNumber))
for player in self.players:
playersHand = self.players.get(player)
print('player %s is now playing and has %i ranks' % (player,len(playersHand)))
temp = whoToAsk.get(player,GOFISH)
if (temp == GOFISH) or (len(temp)==0):
whoToAsk[player]=[]
for temp in self.players:
if temp != player:
whoToAsk[player].append(temp)
giver = whoToAsk[player].pop(0)
giverHand = self.players.get(giver)
received = askForCard(player, playersHand, giver, giverHand)
if received == GOFISH:
if len(self.deck) == 0:
print("nothing to draw. moving along. will ask another player next round")
#for debugPlayer in self.players:
# print("player %s has the following cards %s" %(debugPlayer,self.players.get(debugPlayer)))
#notDone=False
#continue
else:
drawCard(player, self.deck, playersHand)
playHand(player, playersHand)
if len(playersHand) <= 0:
print('player %s has won!' %(player))
notDone=False
continue
print("game over")
# Main
if __name__ == '__main__':
'''Plays a Go Fish with the players passed as arguments'''
#
#ToDo - add interactive mode
#
players = []
count = 0
for p in sys.argv[1:]:
players.append(p)
count += 1
game = GoFish(playerList=players)
game.autoPlay()
|
[
"'''\r\nCreated on Sep 2, 2014\r\n\r\n@author: Kevin\r\n'''\r\nfrom test.pickletester import MyList\r\n\r\n# from Adam Barr's wonderful book \"Find the Bug\"\r\n\r\n''' This function draws a card from a deck and puts it into a hand. It is\r\nmeant to be part of the game Go Fish, so if the resulting hand has all four \r\nsuits for a given card rank, those four cards are removed from the hand. \r\n\r\nCards are identified by their rank and suit: the rank is one of the elements\r\nin the list [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\r\nand the suit is on of the elements of the list [\"spades\", \"hearts\", \"diamonds\", \"clubs\"].\r\n\r\nA deck is a list that initially contains 52 elements. Each element of the list\r\nis a tuple with two elements: the rank and the suit. So a single entry\r\nin the deck might be the tuple (\"K\", \"spades\"), which is the king of spades.\r\n\r\nA hand is a dictionary. In each element of the dictionary, the key is\r\nthe rank and the value is a list that contains the names of the suits that the hand\r\nholds for that rank. E.g., if a hand as the 3 of spades and the 3 of hearts, and\r\nno other 3s, then the key \"3\" has the value [\"spades\", \"hearts\"]. A key should not\r\nhave an empty list associated with it - if no cards of a given rank are held,\r\nno value exists for that key.'''\r\n\r\nimport random\r\nimport sys\r\n\r\nrankList = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\r\nsuitList = [\"spades\", \"hearts\", \"diamonds\", \"clubs\"]\r\nGOFISH = \"Go Fish\"\r\n \r\ndef log_stdout(msg):\r\n '''Print msg to the screen.'''\r\n print(msg)\r\n\r\ndef makeDeck():\r\n ''' Creates a deck.\r\n A deck is a list that initially contains 52 elements. Each element of the list\r\n is a tuple with two elements: the rank and the suit. So a single entry\r\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \r\n '''\r\n \r\n deck = []\r\n for r in rankList:\r\n for s in suitList:\r\n deck.append([r, s])\r\n return deck\r\n\r\ndef getCard(deck):\r\n\r\n ''' Randomly remove a single card from the deck and return it. Assumes that the deck \r\n is not empty.\r\n\r\n deck: A deck as described above\r\n\r\n Returns: a single card, which is a tuple with two elements, the rank and the suit\r\n\r\n '''\r\n index = random.randint(0,len(deck)-1)\r\n newCard = deck[index]\r\n del deck[index]\r\n return newCard\r\n\r\ndef askForCard(requestor, requestorHand, giver, giverHand ):\r\n '''Asks other player for a needed rank '''\r\n if len(requestorHand) == 0:\r\n print(\"%s has no cards. %s\" %(requestor,GOFISH))\r\n return\r\n\r\n #find the rank with maximum count\r\n maxKey = GOFISH\r\n maxCount = 0\r\n for key in requestorHand:\r\n count=len(requestorHand.get(key))\r\n if count > maxCount:\r\n maxKey = key\r\n maxCount = count\r\n \r\n if len(giverHand) == 0:\r\n print(\"%s has requested %s but %s has no cards. %s\" %(requestor,maxKey,giver,GOFISH))\r\n return GOFISH\r\n \r\n received = giverHand.get(maxKey,GOFISH)\r\n print(\"%s asked %s for %s and the answer was %s\" %(requestor,giver, maxKey,received))\r\n \r\n if received == GOFISH:\r\n return GOFISH\r\n for value in received:\r\n requestorHand[maxKey].append(value)\r\n del giverHand[maxKey]\r\n return received\r\n \r\n\r\ndef drawCard(name, deck, hand):\r\n\r\n ''' Draw a new card from the deck and add it to the hand. If the hand now holds the rank\r\n in all four suits, then remove them from the hand.\r\n\r\n name: A string with the name of the playerHand, used only for display purposes.\r\n deck: A deck as described above\r\n hand: A hand dictionary as described above\r\n\r\n Returns: None.\r\n '''\r\n\r\n if len(deck) > 0: # protect against empty deck\r\n newCard = getCard(deck)\r\n cardRank = newCard[0]\r\n cardSuit = newCard[1]\r\n else:\r\n return\r\n\r\n if cardRank in hand:\r\n # append this suit to the result\r\n hand[cardRank].append(cardSuit)\r\n else:\r\n # first of this suit, create a list with one element\r\n hand[cardRank] = [ cardSuit ]\r\n\r\ndef initHand(deck,hand,numberOfCards):\r\n for i in range(numberOfCards):\r\n newCard=getCard(deck)\r\n cardRank = newCard[0]\r\n cardSuit = newCard[1]\r\n testList = hand.get(cardRank,\"notAsuitSuit\")\r\n if testList == \"notAsuitSuit\":\r\n hand[cardRank]=[cardSuit]\r\n else:\r\n hand[cardRank].append(cardSuit)\r\n\r\ndef playHand(name,hand):\r\n played=False\r\n for r in rankList:\r\n cardSuits=hand.get(r,\"notAsuitSuit\")\r\n if len(cardSuits) == 4:\r\n print('%s %s %s' % (name, \"lay down\", r + \"s\"))\r\n del hand[r]\r\n played=True\r\n if not played:\r\n print(\"player %s has nothing to play\" %(name)) \r\n \r\nclass GoFish:\r\n ''' Play a game of Go Fish!\r\n '''\r\n def __init__(self, playerList=[\"DefaultPlayer1\", \"DefaultPlayer2\"]):\r\n if (len(playerList)>0):\r\n tempPlayers = playerList\r\n else:\r\n tempPlayers = [\"DefaultPlayer1\", \"DefaultPlayer2\"]\r\n self.deck=makeDeck()\r\n self.players={}\r\n initCardCount=7 \r\n if (len(tempPlayers)>4):\r\n initCardCount=5\r\n for name in tempPlayers:\r\n self.players[name]={}\r\n initHand(self.deck, self.players[name], initCardCount)\r\n\r\n def autoPlay(self):\r\n '''Plays a game of GoFish'''\r\n notDone = True\r\n roundNumber = 0\r\n whoToAsk={}\r\n while notDone:\r\n roundNumber+=1\r\n print('Round %i !' % (roundNumber))\r\n for player in self.players:\r\n playersHand = self.players.get(player)\r\n print('player %s is now playing and has %i ranks' % (player,len(playersHand))) \r\n \r\n temp = whoToAsk.get(player,GOFISH)\r\n if (temp == GOFISH) or (len(temp)==0):\r\n whoToAsk[player]=[]\r\n for temp in self.players:\r\n if temp != player:\r\n whoToAsk[player].append(temp)\r\n \r\n giver = whoToAsk[player].pop(0)\r\n giverHand = self.players.get(giver)\r\n received = askForCard(player, playersHand, giver, giverHand)\r\n if received == GOFISH:\r\n if len(self.deck) == 0:\r\n print(\"nothing to draw. moving along. will ask another player next round\")\r\n #for debugPlayer in self.players:\r\n # print(\"player %s has the following cards %s\" %(debugPlayer,self.players.get(debugPlayer)))\r\n #notDone=False\r\n #continue\r\n else:\r\n drawCard(player, self.deck, playersHand)\r\n playHand(player, playersHand)\r\n if len(playersHand) <= 0:\r\n print('player %s has won!' %(player))\r\n notDone=False\r\n continue\r\n print(\"game over\")\r\n \r\n# Main\r\nif __name__ == '__main__':\r\n '''Plays a Go Fish with the players passed as arguments'''\r\n #\r\n #ToDo - add interactive mode\r\n # \r\n players = []\r\n count = 0\r\n for p in sys.argv[1:]:\r\n players.append(p)\r\n count += 1\r\n\r\n game = GoFish(playerList=players)\r\n game.autoPlay()\r\n \r\n",
"<docstring token>\nfrom test.pickletester import MyList\n<docstring token>\nimport random\nimport sys\nrankList = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nsuitList = ['spades', 'hearts', 'diamonds', 'clubs']\nGOFISH = 'Go Fish'\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\ndef getCard(deck):\n \"\"\" Randomly remove a single card from the deck and return it. Assumes that the deck \n is not empty.\n\n deck: A deck as described above\n\n Returns: a single card, which is a tuple with two elements, the rank and the suit\n\n \"\"\"\n index = random.randint(0, len(deck) - 1)\n newCard = deck[index]\n del deck[index]\n return newCard\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\ndef drawCard(name, deck, hand):\n \"\"\" Draw a new card from the deck and add it to the hand. If the hand now holds the rank\n in all four suits, then remove them from the hand.\n\n name: A string with the name of the playerHand, used only for display purposes.\n deck: A deck as described above\n hand: A hand dictionary as described above\n\n Returns: None.\n \"\"\"\n if len(deck) > 0:\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n else:\n return\n if cardRank in hand:\n hand[cardRank].append(cardSuit)\n else:\n hand[cardRank] = [cardSuit]\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\nif __name__ == '__main__':\n \"\"\"Plays a Go Fish with the players passed as arguments\"\"\"\n players = []\n count = 0\n for p in sys.argv[1:]:\n players.append(p)\n count += 1\n game = GoFish(playerList=players)\n game.autoPlay()\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\nrankList = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nsuitList = ['spades', 'hearts', 'diamonds', 'clubs']\nGOFISH = 'Go Fish'\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\ndef getCard(deck):\n \"\"\" Randomly remove a single card from the deck and return it. Assumes that the deck \n is not empty.\n\n deck: A deck as described above\n\n Returns: a single card, which is a tuple with two elements, the rank and the suit\n\n \"\"\"\n index = random.randint(0, len(deck) - 1)\n newCard = deck[index]\n del deck[index]\n return newCard\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\ndef drawCard(name, deck, hand):\n \"\"\" Draw a new card from the deck and add it to the hand. If the hand now holds the rank\n in all four suits, then remove them from the hand.\n\n name: A string with the name of the playerHand, used only for display purposes.\n deck: A deck as described above\n hand: A hand dictionary as described above\n\n Returns: None.\n \"\"\"\n if len(deck) > 0:\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n else:\n return\n if cardRank in hand:\n hand[cardRank].append(cardSuit)\n else:\n hand[cardRank] = [cardSuit]\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\nif __name__ == '__main__':\n \"\"\"Plays a Go Fish with the players passed as arguments\"\"\"\n players = []\n count = 0\n for p in sys.argv[1:]:\n players.append(p)\n count += 1\n game = GoFish(playerList=players)\n game.autoPlay()\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\ndef getCard(deck):\n \"\"\" Randomly remove a single card from the deck and return it. Assumes that the deck \n is not empty.\n\n deck: A deck as described above\n\n Returns: a single card, which is a tuple with two elements, the rank and the suit\n\n \"\"\"\n index = random.randint(0, len(deck) - 1)\n newCard = deck[index]\n del deck[index]\n return newCard\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\ndef drawCard(name, deck, hand):\n \"\"\" Draw a new card from the deck and add it to the hand. If the hand now holds the rank\n in all four suits, then remove them from the hand.\n\n name: A string with the name of the playerHand, used only for display purposes.\n deck: A deck as described above\n hand: A hand dictionary as described above\n\n Returns: None.\n \"\"\"\n if len(deck) > 0:\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n else:\n return\n if cardRank in hand:\n hand[cardRank].append(cardSuit)\n else:\n hand[cardRank] = [cardSuit]\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\nif __name__ == '__main__':\n \"\"\"Plays a Go Fish with the players passed as arguments\"\"\"\n players = []\n count = 0\n for p in sys.argv[1:]:\n players.append(p)\n count += 1\n game = GoFish(playerList=players)\n game.autoPlay()\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\ndef getCard(deck):\n \"\"\" Randomly remove a single card from the deck and return it. Assumes that the deck \n is not empty.\n\n deck: A deck as described above\n\n Returns: a single card, which is a tuple with two elements, the rank and the suit\n\n \"\"\"\n index = random.randint(0, len(deck) - 1)\n newCard = deck[index]\n del deck[index]\n return newCard\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\ndef drawCard(name, deck, hand):\n \"\"\" Draw a new card from the deck and add it to the hand. If the hand now holds the rank\n in all four suits, then remove them from the hand.\n\n name: A string with the name of the playerHand, used only for display purposes.\n deck: A deck as described above\n hand: A hand dictionary as described above\n\n Returns: None.\n \"\"\"\n if len(deck) > 0:\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n else:\n return\n if cardRank in hand:\n hand[cardRank].append(cardSuit)\n else:\n hand[cardRank] = [cardSuit]\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\ndef getCard(deck):\n \"\"\" Randomly remove a single card from the deck and return it. Assumes that the deck \n is not empty.\n\n deck: A deck as described above\n\n Returns: a single card, which is a tuple with two elements, the rank and the suit\n\n \"\"\"\n index = random.randint(0, len(deck) - 1)\n newCard = deck[index]\n del deck[index]\n return newCard\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\n<function token>\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\n<function token>\n\n\ndef askForCard(requestor, requestorHand, giver, giverHand):\n \"\"\"Asks other player for a needed rank \"\"\"\n if len(requestorHand) == 0:\n print('%s has no cards. %s' % (requestor, GOFISH))\n return\n maxKey = GOFISH\n maxCount = 0\n for key in requestorHand:\n count = len(requestorHand.get(key))\n if count > maxCount:\n maxKey = key\n maxCount = count\n if len(giverHand) == 0:\n print('%s has requested %s but %s has no cards. %s' % (requestor,\n maxKey, giver, GOFISH))\n return GOFISH\n received = giverHand.get(maxKey, GOFISH)\n print('%s asked %s for %s and the answer was %s' % (requestor, giver,\n maxKey, received))\n if received == GOFISH:\n return GOFISH\n for value in received:\n requestorHand[maxKey].append(value)\n del giverHand[maxKey]\n return received\n\n\n<function token>\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\ndef playHand(name, hand):\n played = False\n for r in rankList:\n cardSuits = hand.get(r, 'notAsuitSuit')\n if len(cardSuits) == 4:\n print('%s %s %s' % (name, 'lay down', r + 's'))\n del hand[r]\n played = True\n if not played:\n print('player %s has nothing to play' % name)\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n\n\ndef log_stdout(msg):\n \"\"\"Print msg to the screen.\"\"\"\n print(msg)\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\n<function token>\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef initHand(deck, hand, numberOfCards):\n for i in range(numberOfCards):\n newCard = getCard(deck)\n cardRank = newCard[0]\n cardSuit = newCard[1]\n testList = hand.get(cardRank, 'notAsuitSuit')\n if testList == 'notAsuitSuit':\n hand[cardRank] = [cardSuit]\n else:\n hand[cardRank].append(cardSuit)\n\n\n<function token>\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef makeDeck():\n \"\"\" Creates a deck.\n A deck is a list that initially contains 52 elements. Each element of the list\n is a tuple with two elements: the rank and the suit. So a single entry\n in the deck might be the tuple (\"K\", \"spades\"), which is the king of spades. \n \"\"\"\n deck = []\n for r in rankList:\n for s in suitList:\n deck.append([r, s])\n return deck\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass GoFish:\n \"\"\" Play a game of Go Fish!\n \"\"\"\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass GoFish:\n <docstring token>\n\n def __init__(self, playerList=['DefaultPlayer1', 'DefaultPlayer2']):\n if len(playerList) > 0:\n tempPlayers = playerList\n else:\n tempPlayers = ['DefaultPlayer1', 'DefaultPlayer2']\n self.deck = makeDeck()\n self.players = {}\n initCardCount = 7\n if len(tempPlayers) > 4:\n initCardCount = 5\n for name in tempPlayers:\n self.players[name] = {}\n initHand(self.deck, self.players[name], initCardCount)\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass GoFish:\n <docstring token>\n <function token>\n\n def autoPlay(self):\n \"\"\"Plays a game of GoFish\"\"\"\n notDone = True\n roundNumber = 0\n whoToAsk = {}\n while notDone:\n roundNumber += 1\n print('Round %i !' % roundNumber)\n for player in self.players:\n playersHand = self.players.get(player)\n print('player %s is now playing and has %i ranks' % (player,\n len(playersHand)))\n temp = whoToAsk.get(player, GOFISH)\n if temp == GOFISH or len(temp) == 0:\n whoToAsk[player] = []\n for temp in self.players:\n if temp != player:\n whoToAsk[player].append(temp)\n giver = whoToAsk[player].pop(0)\n giverHand = self.players.get(giver)\n received = askForCard(player, playersHand, giver, giverHand)\n if received == GOFISH:\n if len(self.deck) == 0:\n print(\n 'nothing to draw. moving along. will ask another player next round'\n )\n else:\n drawCard(player, self.deck, playersHand)\n playHand(player, playersHand)\n if len(playersHand) <= 0:\n print('player %s has won!' % player)\n notDone = False\n continue\n print('game over')\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass GoFish:\n <docstring token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<code token>\n"
] | false |
99,522 |
9a3ba83e989d9ee2905aa7b4df65f3a5badf62ea
|
def isNum(n):
if(n == "0" or n == "1" or n == "2" or n == "3" or n == "4" or n == "5" or n == "6" or n == "7" or n == "8" or n == "9"):
return True
else:
return False
inputNum, dec = input().split(" ")
dec = int(dec)
result = 0
for i in range(len(inputNum)):
if isNum(inputNum[i]):
# print("type1", int(inputNum[i]), int(inputNum[i])*dec**(len(inputNum)-1-i))
result += (int(inputNum[i])*dec**(len(inputNum)-1-i))
else:
# print("type2", (ord(inputNum[i])-55), (ord(inputNum[i])-55)*dec**(len(inputNum)-1-i))
result += ((ord(inputNum[i])-55)*dec**(len(inputNum)-1-i))
print(result)
|
[
"def isNum(n):\n if(n == \"0\" or n == \"1\" or n == \"2\" or n == \"3\" or n == \"4\" or n == \"5\" or n == \"6\" or n == \"7\" or n == \"8\" or n == \"9\"):\n return True\n else:\n return False\n\n\ninputNum, dec = input().split(\" \")\ndec = int(dec)\nresult = 0\nfor i in range(len(inputNum)):\n if isNum(inputNum[i]):\n # print(\"type1\", int(inputNum[i]), int(inputNum[i])*dec**(len(inputNum)-1-i))\n result += (int(inputNum[i])*dec**(len(inputNum)-1-i))\n else:\n # print(\"type2\", (ord(inputNum[i])-55), (ord(inputNum[i])-55)*dec**(len(inputNum)-1-i))\n result += ((ord(inputNum[i])-55)*dec**(len(inputNum)-1-i))\nprint(result)\n",
"def isNum(n):\n if (n == '0' or n == '1' or n == '2' or n == '3' or n == '4' or n ==\n '5' or n == '6' or n == '7' or n == '8' or n == '9'):\n return True\n else:\n return False\n\n\ninputNum, dec = input().split(' ')\ndec = int(dec)\nresult = 0\nfor i in range(len(inputNum)):\n if isNum(inputNum[i]):\n result += int(inputNum[i]) * dec ** (len(inputNum) - 1 - i)\n else:\n result += (ord(inputNum[i]) - 55) * dec ** (len(inputNum) - 1 - i)\nprint(result)\n",
"def isNum(n):\n if (n == '0' or n == '1' or n == '2' or n == '3' or n == '4' or n ==\n '5' or n == '6' or n == '7' or n == '8' or n == '9'):\n return True\n else:\n return False\n\n\n<assignment token>\nfor i in range(len(inputNum)):\n if isNum(inputNum[i]):\n result += int(inputNum[i]) * dec ** (len(inputNum) - 1 - i)\n else:\n result += (ord(inputNum[i]) - 55) * dec ** (len(inputNum) - 1 - i)\nprint(result)\n",
"def isNum(n):\n if (n == '0' or n == '1' or n == '2' or n == '3' or n == '4' or n ==\n '5' or n == '6' or n == '7' or n == '8' or n == '9'):\n return True\n else:\n return False\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
99,523 |
16204468e556558ea63a1082dbe8777a79d2a658
|
from builders import Literal
from internal.writer import Writer
class Script(object):
def __init__(self):
self._children = []
self._writer = Writer()
def add(self, child):
# We special-case strings so that people don't need to write "Literal" everywhere.
if isinstance(child, str):
child = Literal(child)
self._children.append(child)
return self
# Converts everything in the script to a string
# and returns it.
def serialize(self):
for child in self._children[:-1]:
child.build().accept(self._writer)
self._writer.newline(1)
# We don't want a newline after the last line, so we add it separately.
self._children[-1].build().accept(self._writer)
return self._writer.done()
|
[
"from builders import Literal\nfrom internal.writer import Writer\n\nclass Script(object):\n def __init__(self):\n self._children = []\n self._writer = Writer()\n\n def add(self, child):\n # We special-case strings so that people don't need to write \"Literal\" everywhere.\n if isinstance(child, str):\n child = Literal(child)\n self._children.append(child)\n return self\n\n # Converts everything in the script to a string\n # and returns it.\n def serialize(self):\n for child in self._children[:-1]:\n child.build().accept(self._writer)\n self._writer.newline(1)\n # We don't want a newline after the last line, so we add it separately.\n self._children[-1].build().accept(self._writer)\n return self._writer.done()",
"from builders import Literal\nfrom internal.writer import Writer\n\n\nclass Script(object):\n\n def __init__(self):\n self._children = []\n self._writer = Writer()\n\n def add(self, child):\n if isinstance(child, str):\n child = Literal(child)\n self._children.append(child)\n return self\n\n def serialize(self):\n for child in self._children[:-1]:\n child.build().accept(self._writer)\n self._writer.newline(1)\n self._children[-1].build().accept(self._writer)\n return self._writer.done()\n",
"<import token>\n\n\nclass Script(object):\n\n def __init__(self):\n self._children = []\n self._writer = Writer()\n\n def add(self, child):\n if isinstance(child, str):\n child = Literal(child)\n self._children.append(child)\n return self\n\n def serialize(self):\n for child in self._children[:-1]:\n child.build().accept(self._writer)\n self._writer.newline(1)\n self._children[-1].build().accept(self._writer)\n return self._writer.done()\n",
"<import token>\n\n\nclass Script(object):\n\n def __init__(self):\n self._children = []\n self._writer = Writer()\n\n def add(self, child):\n if isinstance(child, str):\n child = Literal(child)\n self._children.append(child)\n return self\n <function token>\n",
"<import token>\n\n\nclass Script(object):\n\n def __init__(self):\n self._children = []\n self._writer = Writer()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Script(object):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,524 |
cf9d1ed0768c28745719e1949a88c7de972313ed
|
# _*_ coding: utf-8 _*_
from flask import Flask
from config import Config
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
from .views import main_bp
app.register_blueprint(main_bp)
return app
|
[
"# _*_ coding: utf-8 _*_\nfrom flask import Flask\nfrom config import Config\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(Config)\n\n from .views import main_bp\n\n app.register_blueprint(main_bp)\n\n return app\n",
"from flask import Flask\nfrom config import Config\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(Config)\n from .views import main_bp\n app.register_blueprint(main_bp)\n return app\n",
"<import token>\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(Config)\n from .views import main_bp\n app.register_blueprint(main_bp)\n return app\n",
"<import token>\n<function token>\n"
] | false |
99,525 |
40ab6ef57075a5eaef6a423c040bd453dce791f7
|
import setuptools
setuptools.setup(
entry_points={
"console_scripts": [
"run_etl = sliide_etl.main:main"
]
},
name="sliide_etl",
package_dir={
"sliide_etl": ""
},
packages=setuptools.find_packages(),
install_requires=[
"pandas"
],
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest-runner",
"pytest"
],
package_data={
"": ["*"]
},
version='0.1.0',
)
|
[
"import setuptools\n\nsetuptools.setup(\n entry_points={\n \"console_scripts\": [\n \"run_etl = sliide_etl.main:main\"\n ]\n },\n name=\"sliide_etl\",\n package_dir={\n \"sliide_etl\": \"\"\n },\n packages=setuptools.find_packages(),\n install_requires=[\n \"pandas\"\n ],\n setup_requires=[\n \"pytest-runner\"\n ],\n tests_require=[\n \"pytest-runner\",\n \"pytest\"\n ],\n package_data={\n \"\": [\"*\"]\n },\n version='0.1.0',\n)\n",
"import setuptools\nsetuptools.setup(entry_points={'console_scripts': [\n 'run_etl = sliide_etl.main:main']}, name='sliide_etl', package_dir={\n 'sliide_etl': ''}, packages=setuptools.find_packages(),\n install_requires=['pandas'], setup_requires=['pytest-runner'],\n tests_require=['pytest-runner', 'pytest'], package_data={'': ['*']},\n version='0.1.0')\n",
"<import token>\nsetuptools.setup(entry_points={'console_scripts': [\n 'run_etl = sliide_etl.main:main']}, name='sliide_etl', package_dir={\n 'sliide_etl': ''}, packages=setuptools.find_packages(),\n install_requires=['pandas'], setup_requires=['pytest-runner'],\n tests_require=['pytest-runner', 'pytest'], package_data={'': ['*']},\n version='0.1.0')\n",
"<import token>\n<code token>\n"
] | false |
99,526 |
5ee7989bc9e1fc0595b7aa43636a041864304bd0
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InstanceGroupAdjustment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'instance_group': 'str',
'scaling_adjustment': 'int'
}
attribute_map = {
'instance_group': 'instanceGroup',
'scaling_adjustment': 'scalingAdjustment'
}
def __init__(self, instance_group=None, scaling_adjustment=None):
"""
InstanceGroupAdjustment - a model defined in Swagger
"""
self._instance_group = None
self._scaling_adjustment = None
self.instance_group = instance_group
self.scaling_adjustment = scaling_adjustment
@property
def instance_group(self):
"""
Gets the instance_group of this InstanceGroupAdjustment.
name of the instance group
:return: The instance_group of this InstanceGroupAdjustment.
:rtype: str
"""
return self._instance_group
@instance_group.setter
def instance_group(self, instance_group):
"""
Sets the instance_group of this InstanceGroupAdjustment.
name of the instance group
:param instance_group: The instance_group of this InstanceGroupAdjustment.
:type: str
"""
if instance_group is None:
raise ValueError("Invalid value for `instance_group`, must not be `None`")
self._instance_group = instance_group
@property
def scaling_adjustment(self):
"""
Gets the scaling_adjustment of this InstanceGroupAdjustment.
scaling adjustment of the instance groups
:return: The scaling_adjustment of this InstanceGroupAdjustment.
:rtype: int
"""
return self._scaling_adjustment
@scaling_adjustment.setter
def scaling_adjustment(self, scaling_adjustment):
"""
Sets the scaling_adjustment of this InstanceGroupAdjustment.
scaling adjustment of the instance groups
:param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.
:type: int
"""
if scaling_adjustment is None:
raise ValueError("Invalid value for `scaling_adjustment`, must not be `None`")
self._scaling_adjustment = scaling_adjustment
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InstanceGroupAdjustment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"# coding: utf-8\n\n\"\"\"\n Cloudbreak API\n\n Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\\\"http://hortonworks.com/apache/cloudbreak/\\\">http://hortonworks.com/apache/cloudbreak/</a>\n\n OpenAPI spec version: 2.9.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass InstanceGroupAdjustment(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'instance_group': 'str',\n 'scaling_adjustment': 'int'\n }\n\n attribute_map = {\n 'instance_group': 'instanceGroup',\n 'scaling_adjustment': 'scalingAdjustment'\n }\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n\n self._instance_group = None\n self._scaling_adjustment = None\n\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\"Invalid value for `instance_group`, must not be `None`\")\n\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\"Invalid value for `scaling_adjustment`, must not be `None`\")\n\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass InstanceGroupAdjustment(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'instance_group': 'str', 'scaling_adjustment': 'int'}\n attribute_map = {'instance_group': 'instanceGroup',\n 'scaling_adjustment': 'scalingAdjustment'}\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n self._instance_group = None\n self._scaling_adjustment = None\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'instance_group': 'str', 'scaling_adjustment': 'int'}\n attribute_map = {'instance_group': 'instanceGroup',\n 'scaling_adjustment': 'scalingAdjustment'}\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n self._instance_group = None\n self._scaling_adjustment = None\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n swagger_types = {'instance_group': 'str', 'scaling_adjustment': 'int'}\n attribute_map = {'instance_group': 'instanceGroup',\n 'scaling_adjustment': 'scalingAdjustment'}\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n self._instance_group = None\n self._scaling_adjustment = None\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n self._instance_group = None\n self._scaling_adjustment = None\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, instance_group=None, scaling_adjustment=None):\n \"\"\"\n InstanceGroupAdjustment - a model defined in Swagger\n \"\"\"\n self._instance_group = None\n self._scaling_adjustment = None\n self.instance_group = instance_group\n self.scaling_adjustment = scaling_adjustment\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n\n @property\n def instance_group(self):\n \"\"\"\n Gets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :return: The instance_group of this InstanceGroupAdjustment.\n :rtype: str\n \"\"\"\n return self._instance_group\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n\n @scaling_adjustment.setter\n def scaling_adjustment(self, scaling_adjustment):\n \"\"\"\n Sets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :param scaling_adjustment: The scaling_adjustment of this InstanceGroupAdjustment.\n :type: int\n \"\"\"\n if scaling_adjustment is None:\n raise ValueError(\n 'Invalid value for `scaling_adjustment`, must not be `None`')\n self._scaling_adjustment = scaling_adjustment\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n <function token>\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n <function token>\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n <function token>\n <function token>\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, InstanceGroupAdjustment):\n return False\n return self.__dict__ == other.__dict__\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n @instance_group.setter\n def instance_group(self, instance_group):\n \"\"\"\n Sets the instance_group of this InstanceGroupAdjustment.\n name of the instance group\n\n :param instance_group: The instance_group of this InstanceGroupAdjustment.\n :type: str\n \"\"\"\n if instance_group is None:\n raise ValueError(\n 'Invalid value for `instance_group`, must not be `None`')\n self._instance_group = instance_group\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n <function token>\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n <function token>\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @property\n def scaling_adjustment(self):\n \"\"\"\n Gets the scaling_adjustment of this InstanceGroupAdjustment.\n scaling adjustment of the instance groups\n\n :return: The scaling_adjustment of this InstanceGroupAdjustment.\n :rtype: int\n \"\"\"\n return self._scaling_adjustment\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass InstanceGroupAdjustment(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,527 |
e09421f3a93d1ef618be1cc8154f74f666b14354
|
from CyberSource import *
from pathlib import Path
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
# To delete None values in Input Request Json body
def del_none(d):
for key, value in list(d.items()):
if value is None:
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
def create_search_request():
save = False
name = "MRN"
timezone = "America/Chicago"
query = "clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}"
offset = 0
limit = 100
sort = "id:asc,submitTimeUtc:asc"
requestObj = CreateSearchRequest(
save = save,
name = name,
timezone = timezone,
query = query,
offset = offset,
limit = limit,
sort = sort
)
requestObj = del_none(requestObj.__dict__)
requestObj = json.dumps(requestObj)
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = SearchTransactionsApi(client_config)
return_data, status, body = api_instance.create_search(requestObj)
print("\nAPI RESPONSE CODE : ", status)
print("\nAPI RESPONSE BODY : ", body)
write_log_audit(status)
return return_data
except Exception as e:
write_log_audit(e.status if hasattr(e, 'status') else 999)
print("\nException when calling SearchTransactionsApi->create_search: %s\n" % e)
def write_log_audit(status):
print(f"[Sample Code Testing] [{Path(__file__).stem}] {status}")
if __name__ == "__main__":
create_search_request()
|
[
"from CyberSource import *\nfrom pathlib import Path\nimport os\nimport json\nfrom importlib.machinery import SourceFileLoader\n\nconfig_file = os.path.join(os.getcwd(), \"data\", \"Configuration.py\")\nconfiguration = SourceFileLoader(\"module.name\", config_file).load_module()\n\n# To delete None values in Input Request Json body\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\ndef create_search_request():\n save = False\n name = \"MRN\"\n timezone = \"America/Chicago\"\n query = \"clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}\"\n offset = 0\n limit = 100\n sort = \"id:asc,submitTimeUtc:asc\"\n requestObj = CreateSearchRequest(\n save = save,\n name = name,\n timezone = timezone,\n query = query,\n offset = offset,\n limit = limit,\n sort = sort\n )\n\n\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n\n\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n\n print(\"\\nAPI RESPONSE CODE : \", status)\n print(\"\\nAPI RESPONSE BODY : \", body)\n\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\"\\nException when calling SearchTransactionsApi->create_search: %s\\n\" % e)\n\ndef write_log_audit(status):\n print(f\"[Sample Code Testing] [{Path(__file__).stem}] {status}\")\n\nif __name__ == \"__main__\":\n create_search_request()\n",
"from CyberSource import *\nfrom pathlib import Path\nimport os\nimport json\nfrom importlib.machinery import SourceFileLoader\nconfig_file = os.path.join(os.getcwd(), 'data', 'Configuration.py')\nconfiguration = SourceFileLoader('module.name', config_file).load_module()\n\n\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\ndef write_log_audit(status):\n print(f'[Sample Code Testing] [{Path(__file__).stem}] {status}')\n\n\nif __name__ == '__main__':\n create_search_request()\n",
"<import token>\nconfig_file = os.path.join(os.getcwd(), 'data', 'Configuration.py')\nconfiguration = SourceFileLoader('module.name', config_file).load_module()\n\n\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\ndef write_log_audit(status):\n print(f'[Sample Code Testing] [{Path(__file__).stem}] {status}')\n\n\nif __name__ == '__main__':\n create_search_request()\n",
"<import token>\n<assignment token>\n\n\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\ndef write_log_audit(status):\n print(f'[Sample Code Testing] [{Path(__file__).stem}] {status}')\n\n\nif __name__ == '__main__':\n create_search_request()\n",
"<import token>\n<assignment token>\n\n\ndef del_none(d):\n for key, value in list(d.items()):\n if value is None:\n del d[key]\n elif isinstance(value, dict):\n del_none(value)\n return d\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\ndef write_log_audit(status):\n print(f'[Sample Code Testing] [{Path(__file__).stem}] {status}')\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\ndef write_log_audit(status):\n print(f'[Sample Code Testing] [{Path(__file__).stem}] {status}')\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef create_search_request():\n save = False\n name = 'MRN'\n timezone = 'America/Chicago'\n query = (\n 'clientReferenceInformation.code:TC50171_3 AND submitTimeUtc:[NOW/DAY-7DAYS TO NOW/DAY+1DAY}'\n )\n offset = 0\n limit = 100\n sort = 'id:asc,submitTimeUtc:asc'\n requestObj = CreateSearchRequest(save=save, name=name, timezone=\n timezone, query=query, offset=offset, limit=limit, sort=sort)\n requestObj = del_none(requestObj.__dict__)\n requestObj = json.dumps(requestObj)\n try:\n config_obj = configuration.Configuration()\n client_config = config_obj.get_configuration()\n api_instance = SearchTransactionsApi(client_config)\n return_data, status, body = api_instance.create_search(requestObj)\n print('\\nAPI RESPONSE CODE : ', status)\n print('\\nAPI RESPONSE BODY : ', body)\n write_log_audit(status)\n return return_data\n except Exception as e:\n write_log_audit(e.status if hasattr(e, 'status') else 999)\n print(\n '\\nException when calling SearchTransactionsApi->create_search: %s\\n'\n % e)\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,528 |
fbdb648eec38897dd5eb898ac4ff48b921f3ed5b
|
import mutagen
import os
def extract(l):
""" Extract the first element of a list, raise an error if more than 1 elem """
if l is None: return None
if len(l) > 1:
raise ValueError('More than 1 Value')
try:
return l[0]
except IndexError:
return None
def get_tag(tags, name):
return list(tags.get(name, []))
def sanitize_genres(genres):
l = list()
for genre in genres:
for g in genre.split(','):
l.append(g.strip())
return l
def sanitize_year(year):
if year is None: return year
if isinstance(year, mutagen.id3.ID3TimeStamp):
return year.year
if len(year) == 4: return int(year)
return None
def sanitize_track(track):
if track is None: return track
if isinstance(track, tuple):
return track[0]
if '/' in track:
return int(track.split('/')[0])
return int(track)
def sanitize_disk(disk):
if disk is None: return disk
if isinstance(disk, tuple):
return disk[0]
if '/' in disk:
return int(disk.split('/')[0])
return int(disk)
def get_track_info_mp4(filepath, tags, stream, cover=None):
""" Parses track information from mp4 file """
discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))
if not cover:
coverinfo = extract(tags.get('covr'))
if coverinfo:
if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:
cover = os.path.dirname(filepath) + '/cover.jpg'
elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:
cover = os.path.dirname(filepath) + '/cover.png'
if cover:
f = open(cover, 'wb+')
f.write(bytes(coverinfo))
f.close()
return {
"title": extract(tags.get('\xa9nam')),
"track": sanitize_track(extract(tags.get('trkn'))),
"artists": tags.get('\xa9ART'),
"albumartist": extract(tags.get('aART')) or extract(tags.get('\xa9ART')),
"album": extract(tags.get('\xa9alb')),
"discogs_id": bytes(discogs).decode('utf-8') if discogs else None,
"musicbrainz_id": "",
"disk": sanitize_disk(extract(tags.get('disk'))),
"year": sanitize_year(extract(tags.get('\xa9day'))),
"genres": sanitize_genres(tags.get('\xa9gen')),
"length": stream.length,
"bitrate": stream.bitrate,
"size": os.path.getsize(filepath),
"cover": cover,
"filepath": filepath,
}
def get_track_info_mp3(filepath, tags, stream, cover):
""" Parses track information from mp3 file """
tag = lambda t: get_tag(tags, t)
discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID', tags.getall('TXXX'))))
musicbrainz = extract(list(filter(lambda x: x.desc == 'MusicBrainz Album Id', tags.getall('TXXX'))))
if musicbrainz: musicbrainz = extract(musicbrainz.text)
if not cover:
coverinfo = tags.get('APIC:')
if coverinfo:
if coverinfo.mime == 'image/jpeg':
cover = os.path.dirname(filepath) + '/cover.jpg'
else:
raise ValueError('Not supporting %s' % coverinfo.mime)
if cover:
f = open(cover, 'wb+')
f.write(coverinfo.data)
f.close()
track = sanitize_track(extract(tag('TRCK')))
date = tag('TDRC') or tag('TDAT') or tag('TYER')
return {
"title": extract(tag('TIT2')),
"track": track,
"artists": tag('TPE1'),
"albumartist": extract(tag('TPE2')) or extract(tags.get('TPE1')),
"album": extract(tag('TALB')),
"discogs_id": bytes(discogs).decode('utf-8') if discogs else None,
"musicbrainz_id": musicbrainz,
"disk": sanitize_disk(extract(tag('TPOS'))),
"year": sanitize_year(extract(date)),
"genres": sanitize_genres(tag('TCON')),
"length": stream.length,
"bitrate": stream.bitrate,
"size": os.path.getsize(filepath),
"cover": cover,
"filepath": filepath,
}
def get_track_info_opus(filepath, tags, stream, cover):
# for k, v in tags:
# print(k)
# print(v)
return {
"title": extract(tags.get("TITLE")),
"track": extract(tags.get("TRACK")),
"artists": tags.get('ARTIST'),
"albumartist": extract(tags.get("ALBUMARTIST")),
"album": extract(tags.get("ALBUM")),
# "discogs_id": bytes(discogs).decode('utf-8') if discogs else None,
"musicbrainz_song_id": extract(tags.get("MUSICBRAINZ_TRACKID")),
"musicbrainz_album_id":extract(tags.get("MUSICBRAINZ_ALBUMID")),
# "musicbrainz_artist_id":tags.get("MUSICBRAINZ_ARTISTID"),
"musicbrainz_albumartist_id":extract(tags.get("MUSICBRAINZ_ALBUMARTISTID")),
# "disk": sanitize_disk(extract(tag('TPOS'))),
"year": extract(tags.get("YEAR")),
"genres": tags.get("GENRE"),
"length": stream.length,
# "bitrate": stream.bitrate,
"size": os.path.getsize(filepath),
"cover": cover,
"filepath": filepath,
}
COVERS = {}
def find_cover(folder):
""" Find the cover file base on a folder """
if COVERS.get(folder) is None:
for prefix in ['cover', 'Cover', 'Folder', 'folder']:
for suffix in ['.png', '.jpg', '.jpeg']:
f = os.path.join(folder, prefix + suffix)
if os.path.isfile(f):
COVERS[folder] = f
return f
return COVERS.get(folder)
def get_track_info(dirpath, f):
""" Parses track information from mutagen """
filepath = os.path.join(dirpath, f)
track = mutagen.File(filepath)
if not track:
if filepath.endswith('.mp3') or filepath.endswith('.m4a'):
raise ValueError('Skipped an mp3 or an m4a')
return None
cover = find_cover(dirpath)
if isinstance(track.tags, mutagen.id3.ID3):
return get_track_info_mp3(filepath, track.tags, track.info, cover)
if isinstance(track.tags, mutagen.mp4.MP4Tags):
return get_track_info_mp4(filepath, track.tags, track.info, cover)
if isinstance(track, mutagen.oggopus.OggOpus):
return get_track_info_opus(filepath, track.tags, track.info, cover)
raise ValueError("No parser for file format")
|
[
"\nimport mutagen\nimport os\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None: return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\ndef sanitize_year(year):\n if year is None: return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4: return int(year)\n return None\n\ndef sanitize_track(track):\n if track is None: return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\ndef sanitize_disk(disk):\n if disk is None: return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\ndef get_track_info_mp4(filepath, tags, stream, cover=None):\n \"\"\" Parses track information from mp4 file \"\"\"\n discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))\n if not cover:\n coverinfo = extract(tags.get('covr'))\n if coverinfo:\n if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:\n cover = os.path.dirname(filepath) + '/cover.jpg'\n elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:\n cover = os.path.dirname(filepath) + '/cover.png'\n if cover:\n f = open(cover, 'wb+')\n f.write(bytes(coverinfo))\n f.close()\n\n return {\n \"title\": extract(tags.get('\\xa9nam')),\n \"track\": sanitize_track(extract(tags.get('trkn'))),\n \"artists\": tags.get('\\xa9ART'),\n \"albumartist\": extract(tags.get('aART')) or extract(tags.get('\\xa9ART')),\n \"album\": extract(tags.get('\\xa9alb')),\n \"discogs_id\": bytes(discogs).decode('utf-8') if discogs else None,\n \"musicbrainz_id\": \"\",\n \"disk\": sanitize_disk(extract(tags.get('disk'))),\n \"year\": sanitize_year(extract(tags.get('\\xa9day'))),\n \"genres\": sanitize_genres(tags.get('\\xa9gen')),\n \"length\": stream.length,\n \"bitrate\": stream.bitrate,\n \"size\": os.path.getsize(filepath),\n \"cover\": cover,\n \"filepath\": filepath,\n }\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID', tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc == 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz: musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n\n track = sanitize_track(extract(tag('TRCK')))\n\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {\n \"title\": extract(tag('TIT2')),\n \"track\": track,\n \"artists\": tag('TPE1'),\n \"albumartist\": extract(tag('TPE2')) or extract(tags.get('TPE1')),\n \"album\": extract(tag('TALB')),\n \"discogs_id\": bytes(discogs).decode('utf-8') if discogs else None,\n \"musicbrainz_id\": musicbrainz,\n \"disk\": sanitize_disk(extract(tag('TPOS'))),\n \"year\": sanitize_year(extract(date)),\n \"genres\": sanitize_genres(tag('TCON')),\n \"length\": stream.length,\n \"bitrate\": stream.bitrate,\n \"size\": os.path.getsize(filepath),\n \"cover\": cover,\n \"filepath\": filepath,\n }\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n\n # for k, v in tags:\n # print(k)\n # print(v)\n return {\n \"title\": extract(tags.get(\"TITLE\")),\n \"track\": extract(tags.get(\"TRACK\")),\n \"artists\": tags.get('ARTIST'),\n \"albumartist\": extract(tags.get(\"ALBUMARTIST\")),\n \"album\": extract(tags.get(\"ALBUM\")),\n # \"discogs_id\": bytes(discogs).decode('utf-8') if discogs else None,\n \"musicbrainz_song_id\": extract(tags.get(\"MUSICBRAINZ_TRACKID\")),\n \"musicbrainz_album_id\":extract(tags.get(\"MUSICBRAINZ_ALBUMID\")),\n # \"musicbrainz_artist_id\":tags.get(\"MUSICBRAINZ_ARTISTID\"),\n \"musicbrainz_albumartist_id\":extract(tags.get(\"MUSICBRAINZ_ALBUMARTISTID\")),\n # \"disk\": sanitize_disk(extract(tag('TPOS'))),\n \"year\": extract(tags.get(\"YEAR\")),\n \"genres\": tags.get(\"GENRE\"),\n \"length\": stream.length,\n # \"bitrate\": stream.bitrate,\n \"size\": os.path.getsize(filepath),\n \"cover\": cover,\n \"filepath\": filepath,\n }\n\nCOVERS = {}\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\ndef get_track_info(dirpath, f):\n \"\"\" Parses track information from mutagen \"\"\"\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError(\"No parser for file format\")\n",
"import mutagen\nimport os\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\ndef sanitize_disk(disk):\n if disk is None:\n return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\n\ndef get_track_info_mp4(filepath, tags, stream, cover=None):\n \"\"\" Parses track information from mp4 file \"\"\"\n discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))\n if not cover:\n coverinfo = extract(tags.get('covr'))\n if coverinfo:\n if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:\n cover = os.path.dirname(filepath) + '/cover.jpg'\n elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:\n cover = os.path.dirname(filepath) + '/cover.png'\n if cover:\n f = open(cover, 'wb+')\n f.write(bytes(coverinfo))\n f.close()\n return {'title': extract(tags.get('©nam')), 'track': sanitize_track(\n extract(tags.get('trkn'))), 'artists': tags.get('©ART'),\n 'albumartist': extract(tags.get('aART')) or extract(tags.get('©ART'\n )), 'album': extract(tags.get('©alb')), 'discogs_id': bytes(discogs\n ).decode('utf-8') if discogs else None, 'musicbrainz_id': '',\n 'disk': sanitize_disk(extract(tags.get('disk'))), 'year':\n sanitize_year(extract(tags.get('©day'))), 'genres': sanitize_genres\n (tags.get('©gen')), 'length': stream.length, 'bitrate': stream.\n bitrate, 'size': os.path.getsize(filepath), 'cover': cover,\n 'filepath': filepath}\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\nCOVERS = {}\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\ndef get_track_info(dirpath, f):\n \"\"\" Parses track information from mutagen \"\"\"\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError('No parser for file format')\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\ndef sanitize_disk(disk):\n if disk is None:\n return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\n\ndef get_track_info_mp4(filepath, tags, stream, cover=None):\n \"\"\" Parses track information from mp4 file \"\"\"\n discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))\n if not cover:\n coverinfo = extract(tags.get('covr'))\n if coverinfo:\n if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:\n cover = os.path.dirname(filepath) + '/cover.jpg'\n elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:\n cover = os.path.dirname(filepath) + '/cover.png'\n if cover:\n f = open(cover, 'wb+')\n f.write(bytes(coverinfo))\n f.close()\n return {'title': extract(tags.get('©nam')), 'track': sanitize_track(\n extract(tags.get('trkn'))), 'artists': tags.get('©ART'),\n 'albumartist': extract(tags.get('aART')) or extract(tags.get('©ART'\n )), 'album': extract(tags.get('©alb')), 'discogs_id': bytes(discogs\n ).decode('utf-8') if discogs else None, 'musicbrainz_id': '',\n 'disk': sanitize_disk(extract(tags.get('disk'))), 'year':\n sanitize_year(extract(tags.get('©day'))), 'genres': sanitize_genres\n (tags.get('©gen')), 'length': stream.length, 'bitrate': stream.\n bitrate, 'size': os.path.getsize(filepath), 'cover': cover,\n 'filepath': filepath}\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\nCOVERS = {}\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\ndef get_track_info(dirpath, f):\n \"\"\" Parses track information from mutagen \"\"\"\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError('No parser for file format')\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\ndef sanitize_disk(disk):\n if disk is None:\n return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\n\ndef get_track_info_mp4(filepath, tags, stream, cover=None):\n \"\"\" Parses track information from mp4 file \"\"\"\n discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))\n if not cover:\n coverinfo = extract(tags.get('covr'))\n if coverinfo:\n if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:\n cover = os.path.dirname(filepath) + '/cover.jpg'\n elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:\n cover = os.path.dirname(filepath) + '/cover.png'\n if cover:\n f = open(cover, 'wb+')\n f.write(bytes(coverinfo))\n f.close()\n return {'title': extract(tags.get('©nam')), 'track': sanitize_track(\n extract(tags.get('trkn'))), 'artists': tags.get('©ART'),\n 'albumartist': extract(tags.get('aART')) or extract(tags.get('©ART'\n )), 'album': extract(tags.get('©alb')), 'discogs_id': bytes(discogs\n ).decode('utf-8') if discogs else None, 'musicbrainz_id': '',\n 'disk': sanitize_disk(extract(tags.get('disk'))), 'year':\n sanitize_year(extract(tags.get('©day'))), 'genres': sanitize_genres\n (tags.get('©gen')), 'length': stream.length, 'bitrate': stream.\n bitrate, 'size': os.path.getsize(filepath), 'cover': cover,\n 'filepath': filepath}\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\ndef get_track_info(dirpath, f):\n \"\"\" Parses track information from mutagen \"\"\"\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError('No parser for file format')\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\ndef sanitize_disk(disk):\n if disk is None:\n return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\n\ndef get_track_info_mp4(filepath, tags, stream, cover=None):\n \"\"\" Parses track information from mp4 file \"\"\"\n discogs = extract(tags.get('----:com.apple.iTunes:DISCOGS_RELEASE_ID'))\n if not cover:\n coverinfo = extract(tags.get('covr'))\n if coverinfo:\n if coverinfo.imageformat == mutagen.mp4.AtomDataType.JPEG:\n cover = os.path.dirname(filepath) + '/cover.jpg'\n elif coverinfo.imageformat == mutagen.mp4.AtomDataType.PNG:\n cover = os.path.dirname(filepath) + '/cover.png'\n if cover:\n f = open(cover, 'wb+')\n f.write(bytes(coverinfo))\n f.close()\n return {'title': extract(tags.get('©nam')), 'track': sanitize_track(\n extract(tags.get('trkn'))), 'artists': tags.get('©ART'),\n 'albumartist': extract(tags.get('aART')) or extract(tags.get('©ART'\n )), 'album': extract(tags.get('©alb')), 'discogs_id': bytes(discogs\n ).decode('utf-8') if discogs else None, 'musicbrainz_id': '',\n 'disk': sanitize_disk(extract(tags.get('disk'))), 'year':\n sanitize_year(extract(tags.get('©day'))), 'genres': sanitize_genres\n (tags.get('©gen')), 'length': stream.length, 'bitrate': stream.\n bitrate, 'size': os.path.getsize(filepath), 'cover': cover,\n 'filepath': filepath}\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\ndef sanitize_disk(disk):\n if disk is None:\n return disk\n if isinstance(disk, tuple):\n return disk[0]\n if '/' in disk:\n return int(disk.split('/')[0])\n return int(disk)\n\n\n<function token>\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\n<function token>\n<function token>\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\ndef get_track_info_opus(filepath, tags, stream, cover):\n return {'title': extract(tags.get('TITLE')), 'track': extract(tags.get(\n 'TRACK')), 'artists': tags.get('ARTIST'), 'albumartist': extract(\n tags.get('ALBUMARTIST')), 'album': extract(tags.get('ALBUM')),\n 'musicbrainz_song_id': extract(tags.get('MUSICBRAINZ_TRACKID')),\n 'musicbrainz_album_id': extract(tags.get('MUSICBRAINZ_ALBUMID')),\n 'musicbrainz_albumartist_id': extract(tags.get(\n 'MUSICBRAINZ_ALBUMARTISTID')), 'year': extract(tags.get('YEAR')),\n 'genres': tags.get('GENRE'), 'length': stream.length, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\ndef get_tag(tags, name):\n return list(tags.get(name, []))\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\n<function token>\n<function token>\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<function token>\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\ndef sanitize_track(track):\n if track is None:\n return track\n if isinstance(track, tuple):\n return track[0]\n if '/' in track:\n return int(track.split('/')[0])\n return int(track)\n\n\n<function token>\n<function token>\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<function token>\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_track_info_mp3(filepath, tags, stream, cover):\n \"\"\" Parses track information from mp3 file \"\"\"\n tag = lambda t: get_tag(tags, t)\n discogs = extract(list(filter(lambda x: x.desc == 'DISCOGS_RELEASE_ID',\n tags.getall('TXXX'))))\n musicbrainz = extract(list(filter(lambda x: x.desc ==\n 'MusicBrainz Album Id', tags.getall('TXXX'))))\n if musicbrainz:\n musicbrainz = extract(musicbrainz.text)\n if not cover:\n coverinfo = tags.get('APIC:')\n if coverinfo:\n if coverinfo.mime == 'image/jpeg':\n cover = os.path.dirname(filepath) + '/cover.jpg'\n else:\n raise ValueError('Not supporting %s' % coverinfo.mime)\n if cover:\n f = open(cover, 'wb+')\n f.write(coverinfo.data)\n f.close()\n track = sanitize_track(extract(tag('TRCK')))\n date = tag('TDRC') or tag('TDAT') or tag('TYER')\n return {'title': extract(tag('TIT2')), 'track': track, 'artists': tag(\n 'TPE1'), 'albumartist': extract(tag('TPE2')) or extract(tags.get(\n 'TPE1')), 'album': extract(tag('TALB')), 'discogs_id': bytes(\n discogs).decode('utf-8') if discogs else None, 'musicbrainz_id':\n musicbrainz, 'disk': sanitize_disk(extract(tag('TPOS'))), 'year':\n sanitize_year(extract(date)), 'genres': sanitize_genres(tag('TCON')\n ), 'length': stream.length, 'bitrate': stream.bitrate, 'size': os.\n path.getsize(filepath), 'cover': cover, 'filepath': filepath}\n\n\n<function token>\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n\n\ndef sanitize_genres(genres):\n l = list()\n for genre in genres:\n for g in genre.split(','):\n l.append(g.strip())\n return l\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n<function token>\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n\n\ndef find_cover(folder):\n \"\"\" Find the cover file base on a folder \"\"\"\n if COVERS.get(folder) is None:\n for prefix in ['cover', 'Cover', 'Folder', 'folder']:\n for suffix in ['.png', '.jpg', '.jpeg']:\n f = os.path.join(folder, prefix + suffix)\n if os.path.isfile(f):\n COVERS[folder] = f\n return f\n return COVERS.get(folder)\n\n\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n<function token>\n\n\ndef sanitize_year(year):\n if year is None:\n return year\n if isinstance(year, mutagen.id3.ID3TimeStamp):\n return year.year\n if len(year) == 4:\n return int(year)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef extract(l):\n \"\"\" Extract the first element of a list, raise an error if more than 1 elem \"\"\"\n if l is None:\n return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
99,529 |
e9d1b152476dc413ce851133bfe628341fa5b175
|
from math import ceil
if __name__ == "__main__":
t = int(input())
for _ in range(t):
a, b = map(int, input().split())
print(ceil((a - 2) / 3.0) * ceil((b - 2) / 3.0))
|
[
"from math import ceil\n\nif __name__ == \"__main__\":\n t = int(input())\n\n for _ in range(t):\n a, b = map(int, input().split())\n print(ceil((a - 2) / 3.0) * ceil((b - 2) / 3.0))",
"from math import ceil\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n a, b = map(int, input().split())\n print(ceil((a - 2) / 3.0) * ceil((b - 2) / 3.0))\n",
"<import token>\nif __name__ == '__main__':\n t = int(input())\n for _ in range(t):\n a, b = map(int, input().split())\n print(ceil((a - 2) / 3.0) * ceil((b - 2) / 3.0))\n",
"<import token>\n<code token>\n"
] | false |
99,530 |
22da550ef268e582ff3f2a3c45fd48bcf22b6e36
|
import argparse
import os
import glob
import time
import sys
sys.path.insert(0,'./utils')
from globalVariables import ret_dict,data,res_dict,LABELS_SWORD_COL,_2stream
CHEKPOINT = "./checkpoints"
WEIGHTS = "weights"
LABELS = "classes"
# settings for WampServer
php_webservice = "http://localhost/combine/webservices.php"
wamp_folder = 'C:/wamp64/www/combine/'
def get_sys_info(sys_name):
rgb_dir = None
oflow_dir = None
lstm_dir = None
labels = None
# find which words folder been chosen.
systems = glob.glob(os.path.join(CHEKPOINT,'*'))
systems = list(map(lambda s: s.rsplit(f'{os.sep}',1)[-1],systems))
if not sys_name in systems or len(systems) == 0:
raise ValueError(f"ERROR : could not find {sys_name} in {CHEKPOINT} directory.")
sys_path = os.path.join(CHEKPOINT,sys_name)
# get weights.
sys_weights = glob.glob(os.path.join(sys_path,WEIGHTS,'*.h5'))
if len(sys_weights) == 0:
raise ValueError(f"ERROR : no weights has been found in {WEIGHTS} folder.")
# find rgb,oflow,lstm,lstm_cpu
h5_files = ['rgb','oflow','lstm','cpu']
h5_dirs = {}
for h5_file in h5_files:
h5_dir = [weights for weights in sys_weights if h5_file in weights.lower()]
if len(h5_dir) > 1:
raise ValueError(f"ERROR : In {h5_dir[0].rsplit(os.sep,1)[0]} directory more than one {h5_file} file found.")
h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None
# get labels file
sys_labels = glob.glob(os.path.join(sys_path,LABELS,'*.csv'))
if len(sys_labels) != 1:
raise ValueError(f"ERROR : something wrong with {LABELS} folder.")
return h5_dirs,sys_labels[0]
def print_sys_info(args):
print("running the system with:")
for arg in vars(args):
print(' '*3,f'{arg} = {getattr(args,arg)}')
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
# --run
parser.add_argument(
'-run',
'--run',
dest='run_method',
type=str,
default='webcam',
help='choose a way to test the sign language system.')
parser.add_argument(
'-sys',
'--system',
dest='system_name',
type=str,
default='turkish_10_word',
help='choose which sign language system to run.')
parser.add_argument(
'-use_lstm',
'--use_lstm',
dest='use_lstm',
type=bool,
default=False,
help='add lstm on top of stream network.')
parser.add_argument(
'-rgb',
'--rgb_only',
dest='use_rgb',
type=bool,
default=True,
help='just use rgb stream.')
parser.add_argument(
'-oflow',
'--oflow_only',
dest='use_oflow',
type=bool,
default=False,
help='just use optical flow stream.')
parser.add_argument(
'-on_cpu',
'--use_cpu',
dest='on_cpu',
type=bool,
default=True,
help='run the system on cpu.')
parser.add_argument(
'-pred_type',
'--prediction_type',
dest='pred_type',
type=str,
default='word',
help='define how the system output will be, either word or sentence.')
parser.add_argument(
'-nTop',
'--top_predictions',
dest='nTop',
type=int,
default=3,
help='how many result(output) should the system give.')
parser.add_argument(
'-download',
'--download',
dest='download',
type=bool,
default=False,
help='download weights and classes to checkpoints directory.')
parser.add_argument(
'-mul_oflow',
'--multiprocessing_opticalflow',
dest='mul_oflow',
type=bool,
default=False,
help="faster optical flow calculation with multiprocessing.")
parser.add_argument(
'-oflow_pnum',
'--oflow_process_num',
dest='oflow_pnum',
type=int,
default=4,
help="number of processes to calculate optical flow.")
parser.add_argument(
'-mul_2stream',
'--multiprocessing_two_stream',
dest='mul_2stream',
type=bool,
default=False,
help='run two stream on different processes.')
# CPU OR GPU
# HOW MUCH FRACTION ON GPU DO YOU WANT TO USE
# WHICH GPU TO RUN ON
# WORDS OR SENTENCES
# SINGLE CPU OR MULTIPULE
# use just rgb or just oflow
# don't use lstm
args = parser.parse_args()
# run test script
run_method = args.run_method
use_lstm = args.use_lstm
use_rgb = args.use_rgb
use_oflow = args.use_oflow
on_cpu = args.on_cpu
pred_type = args.pred_type
nTop = args.nTop
download = args.download
mul_oflow = args.mul_oflow
oflow_pnum = args.oflow_pnum
mul_2stream = args.mul_2stream
system_name = args.system_name
# download model weights and labels
if download:
from checkpoints.download import download_sys
Dir = CHEKPOINT+os.sep+system_name
print(f"downloading weights and lables for {system_name} system to {Dir}.")
download_sys(system_name,Dir)
#load checkpoints and labels
models_dir,labels_dir = get_sys_info(system_name)
# informative message
print(f"In {args.system_name} folder:")
for k,v in models_dir.items():
if v is not None:
# informative message
print(f"{' '*4}{k.upper()} WEIGHTS found : {v.rsplit(os.sep,1)[-1]}")
# informative message
print(f"{' '*4}labels : {labels_dir.rsplit(os.sep,1)[-1]}")
# make sure that flags are set properlly
if use_rgb and use_oflow:
raise ValueError("""ERROR : both rgb and oflow flags are on.
trying to use both? set both flag to 'False'""")
if not pred_type == "word" and not pred_type == "sentence":
raise ValueError("ERROR : pred_type should be 'word' or 'sentence'")
con = mul_oflow and not oflow_pnum > 0
#notcon = not mul_oflow and oflow_pnum > 0
if con:
raise ValueError("ERROR : check mul_oflow and oflow_pnum flags.")
if not on_cpu and mul_2stream:
raise ValueError("ERROR : you can't use multiprocessing on streams while the system is running on gpu.")
if (use_rgb or use_oflow) and mul_2stream:
raise ValueError("ERROR : you can't do multiprocessing while using just one stream!.")
# print informative messages for what will be used next
print_sys_info(args)
# create tmp dir
os.makedirs('./tmp', exist_ok=True)
if on_cpu:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from collections import defaultdict
models = defaultdict(lambda : None)
from utils.util import load_models,csv_to_dict
from multiprocessing import Manager
from multiprocessing import Process
labels = csv_to_dict(labels_dir,LABELS_SWORD_COL)
if not mul_2stream:
# load labels
print(f"loading labels from {labels_dir}.")
labels = csv_to_dict(labels_dir,LABELS_SWORD_COL)
print(f"{len(labels)} word found in {labels_dir}")
# load models
uploading_time = time.time()
print("Initializing models")
models = load_models(models_dir,
on_cpu,
use_rgb,
use_oflow,
use_lstm,
False)
print(f"Uploading took {round(time.time()-uploading_time,2)} sec")
else:
models['oflow'] = 1
from utils.parallel_streams import nn_work
_2stream.append(Process(target=nn_work, args=('oflow',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))
_2stream.append(Process(target=nn_work, args=('rgb',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))
if use_lstm:
_2stream.append(Process(target=nn_work, args=('oflow',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))
for p in _2stream:
p.start()
print(f"{len(_2stream)} process has been initialized.")
# run some server with flags cpu gpu pred_type nTop
# if wamp
if run_method == "wamp":
print("running wamp server.")
from run.wamp import run_server
if not os.path.exists(wamp_folder):
raise ValueError(f"ERROR : can't find wamp service in {wamp_folder} directory")
# running wamp server
run_server(php_webservice,
wamp_folder,
models,
labels,
pred_type,
nTop,
mul_oflow,
oflow_pnum,
mul_2stream)
elif run_method == "webcam":
print("testing system on webcam, to close webcam press 'q'.")
from run.webcam import test
test(models,
labels,
pred_type,
nTop,
mul_oflow,
oflow_pnum,
mul_2stream)
elif run_method == "REST_API":
print("Initiate REST API server ...")
from run.REST_API import server
server.run(models,
labels,
pred_type,
nTop,
mul_oflow,
oflow_pnum,
mul_2stream,
host="0.0.0.0")
|
[
"import argparse\nimport os \nimport glob\nimport time \nimport sys\n\n\nsys.path.insert(0,'./utils')\nfrom globalVariables import ret_dict,data,res_dict,LABELS_SWORD_COL,_2stream\n\nCHEKPOINT = \"./checkpoints\"\nWEIGHTS = \"weights\"\nLABELS = \"classes\"\n\n# settings for WampServer \nphp_webservice = \"http://localhost/combine/webservices.php\"\nwamp_folder = 'C:/wamp64/www/combine/'\n\n\ndef get_sys_info(sys_name):\n\t\n\trgb_dir = None\n\toflow_dir = None\n\tlstm_dir = None\n\tlabels = None\n\n\t# find which words folder been chosen.\n\tsystems = glob.glob(os.path.join(CHEKPOINT,'*'))\t\n\tsystems = list(map(lambda s: s.rsplit(f'{os.sep}',1)[-1],systems))\n\n\tif not sys_name in systems or len(systems) == 0:\n\t\traise ValueError(f\"ERROR : could not find {sys_name} in {CHEKPOINT} directory.\")\n\n\tsys_path = os.path.join(CHEKPOINT,sys_name)\n\n\t# get weights.\n\tsys_weights = glob.glob(os.path.join(sys_path,WEIGHTS,'*.h5'))\n\n\tif len(sys_weights) == 0:\n\t\traise ValueError(f\"ERROR : no weights has been found in {WEIGHTS} folder.\")\n\n\t# find rgb,oflow,lstm,lstm_cpu\n\th5_files = ['rgb','oflow','lstm','cpu']\n\th5_dirs = {}\n\tfor h5_file in h5_files:\n\n\t\th5_dir = [weights for weights in sys_weights if h5_file in weights.lower()]\n\t\tif len(h5_dir) > 1:\n\t\t\traise ValueError(f\"ERROR : In {h5_dir[0].rsplit(os.sep,1)[0]} directory more than one {h5_file} file found.\")\n\t\t\n\t\th5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n\n\t# get labels file\n\tsys_labels = glob.glob(os.path.join(sys_path,LABELS,'*.csv'))\n\n\n\tif len(sys_labels) != 1:\n\t\traise ValueError(f\"ERROR : something wrong with {LABELS} folder.\")\n\n\treturn h5_dirs,sys_labels[0]\t\n\ndef print_sys_info(args):\n\n\tprint(\"running the system with:\")\n\tfor arg in vars(args):\n\t\tprint(' '*3,f'{arg} = {getattr(args,arg)}')\n\nif __name__ == '__main__' :\n\n\tparser = argparse.ArgumentParser()\n\n\t# --run \n\tparser.add_argument(\n\t\t'-run',\n\t\t'--run',\n\t\tdest='run_method',\n\t\ttype=str,\n\t\tdefault='webcam',\n\t\thelp='choose a way to test the sign language system.')\n\tparser.add_argument(\n\t\t'-sys',\n\t\t'--system',\n\t\tdest='system_name',\n\t\ttype=str,\n\t\tdefault='turkish_10_word',\n\t\thelp='choose which sign language system to run.')\n\tparser.add_argument(\n\t\t'-use_lstm',\n\t\t'--use_lstm',\n\t\tdest='use_lstm',\n\t\ttype=bool,\n\t\tdefault=False,\n\t\thelp='add lstm on top of stream network.')\n\tparser.add_argument(\n\t\t'-rgb',\n\t\t'--rgb_only',\n\t\tdest='use_rgb',\n\t\ttype=bool,\n\t\tdefault=True,\n\t\thelp='just use rgb stream.')\n\tparser.add_argument(\n\t\t'-oflow',\n\t\t'--oflow_only',\n\t\tdest='use_oflow',\n\t\ttype=bool,\n\t\tdefault=False,\n\t\thelp='just use optical flow stream.')\n\tparser.add_argument(\n\t\t'-on_cpu',\n\t\t'--use_cpu',\n\t\tdest='on_cpu',\n\t\ttype=bool,\n\t\tdefault=True,\n\t\thelp='run the system on cpu.')\n\tparser.add_argument(\n\t\t'-pred_type',\n\t\t'--prediction_type',\n\t\tdest='pred_type',\n\t\ttype=str,\n\t\tdefault='word',\n\t\thelp='define how the system output will be, either word or sentence.')\n\tparser.add_argument(\n\t\t'-nTop',\n\t\t'--top_predictions',\n\t\tdest='nTop',\n\t\ttype=int,\n\t\tdefault=3,\n\t\thelp='how many result(output) should the system give.')\n\tparser.add_argument(\n\t\t'-download',\n\t\t'--download',\n\t\tdest='download',\n\t\ttype=bool,\n\t\tdefault=False,\n\t\thelp='download weights and classes to checkpoints directory.')\n\tparser.add_argument(\n\t\t'-mul_oflow',\n\t\t'--multiprocessing_opticalflow',\n\t\tdest='mul_oflow',\n\t\ttype=bool,\n\t\tdefault=False,\n\t\thelp=\"faster optical flow calculation with multiprocessing.\")\n\tparser.add_argument(\n\t\t'-oflow_pnum',\n\t\t'--oflow_process_num',\n\t\tdest='oflow_pnum',\n\t\ttype=int,\n\t\tdefault=4,\n\t\thelp=\"number of processes to calculate optical flow.\")\n\tparser.add_argument(\n\t\t'-mul_2stream',\n\t\t'--multiprocessing_two_stream',\n\t\tdest='mul_2stream',\n\t\ttype=bool,\n\t\tdefault=False,\n\t\thelp='run two stream on different processes.')\n\t# CPU OR GPU\n\t# HOW MUCH FRACTION ON GPU DO YOU WANT TO USE \n\t# WHICH GPU TO RUN ON\n\t# WORDS OR SENTENCES\n\t# SINGLE CPU OR MULTIPULE\n\t# use just rgb or just oflow\n\t# don't use lstm\n\targs = parser.parse_args()\n\n\t# run test script \n\trun_method = args.run_method\n\tuse_lstm = args.use_lstm\n\tuse_rgb = args.use_rgb\t\n\tuse_oflow = args.use_oflow\n\ton_cpu = args.on_cpu\t\n\tpred_type = args.pred_type\n\tnTop = args.nTop\n\tdownload = args.download\n\tmul_oflow = args.mul_oflow\n\toflow_pnum = args.oflow_pnum\n\tmul_2stream = args.mul_2stream\n\tsystem_name = args.system_name\n\n\t# download model weights and labels\n\tif download:\n\t\tfrom checkpoints.download import download_sys\n\t\tDir = CHEKPOINT+os.sep+system_name\n\t\tprint(f\"downloading weights and lables for {system_name} system to {Dir}.\")\n\t\tdownload_sys(system_name,Dir)\n\n\t#load checkpoints and labels\n\tmodels_dir,labels_dir = get_sys_info(system_name)\n\t# informative message\n\tprint(f\"In {args.system_name} folder:\")\n\tfor k,v in models_dir.items():\n\t\tif v is not None:\n\t\t\t# informative message\n\t\t\tprint(f\"{' '*4}{k.upper()} WEIGHTS found : {v.rsplit(os.sep,1)[-1]}\")\n\t# informative message\n\tprint(f\"{' '*4}labels : {labels_dir.rsplit(os.sep,1)[-1]}\")\n\n\t\n\t# make sure that flags are set properlly\n\tif use_rgb and use_oflow:\n\t\traise ValueError(\"\"\"ERROR : both rgb and oflow flags are on.\n\t\t\t\t\t\t trying to use both? set both flag to 'False'\"\"\")\n\tif not pred_type == \"word\" and not pred_type == \"sentence\":\n\t\traise ValueError(\"ERROR : pred_type should be 'word' or 'sentence'\")\n\tcon = mul_oflow and not oflow_pnum > 0\n\t#notcon = not mul_oflow and oflow_pnum > 0 \n\tif con:\n\t\traise ValueError(\"ERROR : check mul_oflow and oflow_pnum flags.\")\n\tif not on_cpu and mul_2stream:\n\t\traise ValueError(\"ERROR : you can't use multiprocessing on streams while the system is running on gpu.\") \n\tif (use_rgb or use_oflow) and mul_2stream:\n\t\traise ValueError(\"ERROR : you can't do multiprocessing while using just one stream!.\")\n\t# print informative messages for what will be used next\n\tprint_sys_info(args) \n\n\t# create tmp dir\n\tos.makedirs('./tmp', exist_ok=True)\n\n\tif on_cpu:\n\t\tos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\n\tfrom collections import defaultdict\n\n\tmodels = defaultdict(lambda : None)\n\n\tfrom utils.util import load_models,csv_to_dict\n\tfrom multiprocessing import Manager\n\tfrom multiprocessing import Process\n\n\tlabels = csv_to_dict(labels_dir,LABELS_SWORD_COL)\n\t\t\n\tif not mul_2stream:\n\n\t\t# load labels\n\t\tprint(f\"loading labels from {labels_dir}.\")\n\t\tlabels = csv_to_dict(labels_dir,LABELS_SWORD_COL)\n\t\tprint(f\"{len(labels)} word found in {labels_dir}\")\n\n\n\t\t# load models\n\t\tuploading_time = time.time()\n\t\tprint(\"Initializing models\")\n\t\tmodels = load_models(models_dir,\n\t\t\t\t\t\t\t\ton_cpu,\n\t\t\t\t\t\t\t\tuse_rgb,\n\t\t\t\t\t\t\t\tuse_oflow,\n\t\t\t\t\t\t\t\tuse_lstm,\n\t\t\t\t\t\t\t\tFalse)\n\t\tprint(f\"Uploading took {round(time.time()-uploading_time,2)} sec\")\n\telse:\n\t\tmodels['oflow'] = 1\n\t\tfrom utils.parallel_streams import nn_work\n\n\t\t_2stream.append(Process(target=nn_work, args=('oflow',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))\n\t\t_2stream.append(Process(target=nn_work, args=('rgb',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))\n\t\tif use_lstm:\n\t\t\t_2stream.append(Process(target=nn_work, args=('oflow',models_dir,labels_dir,pred_type,nTop,mul_oflow,oflow_pnum)))\n\n\t\tfor p in _2stream:\n\t\t\tp.start()\n\n\t\tprint(f\"{len(_2stream)} process has been initialized.\")\n\n\t# run some server with flags cpu gpu pred_type nTop\n\t# if wamp\n\tif run_method == \"wamp\":\n\t\tprint(\"running wamp server.\")\n\t\tfrom run.wamp import run_server \n\n\t\tif not os.path.exists(wamp_folder):\n\t\t\traise ValueError(f\"ERROR : can't find wamp service in {wamp_folder} directory\")\n\n\t\t# running wamp server\n\t\trun_server(php_webservice,\n\t\t\t\twamp_folder,\n\t\t\t\tmodels,\n\t\t\t\tlabels,\n\t\t\t\tpred_type,\n\t\t\t\tnTop,\n\t\t\t\tmul_oflow,\n\t\t\t\toflow_pnum,\n\t\t\t\tmul_2stream)\n\t\n\telif run_method == \"webcam\":\n\t\tprint(\"testing system on webcam, to close webcam press 'q'.\")\n\t\tfrom run.webcam import test\n\n\t\ttest(models,\n\t\t\tlabels,\n\t\t\tpred_type,\n\t\t\tnTop,\n\t\t\tmul_oflow,\n\t\t\toflow_pnum,\n\t\t\tmul_2stream)\n\n\telif run_method == \"REST_API\":\n\t\tprint(\"Initiate REST API server ...\")\n\t\tfrom run.REST_API import server\n\n\t\tserver.run(models,\n\t\t\t\t\tlabels,\n\t\t\t\t\tpred_type,\n\t\t\t\t\tnTop,\n\t\t\t\t\tmul_oflow,\n\t\t\t\t\toflow_pnum,\n\t\t\t\t\tmul_2stream,\n\t\t\t\t\thost=\"0.0.0.0\")\n",
"import argparse\nimport os\nimport glob\nimport time\nimport sys\nsys.path.insert(0, './utils')\nfrom globalVariables import ret_dict, data, res_dict, LABELS_SWORD_COL, _2stream\nCHEKPOINT = './checkpoints'\nWEIGHTS = 'weights'\nLABELS = 'classes'\nphp_webservice = 'http://localhost/combine/webservices.php'\nwamp_folder = 'C:/wamp64/www/combine/'\n\n\ndef get_sys_info(sys_name):\n rgb_dir = None\n oflow_dir = None\n lstm_dir = None\n labels = None\n systems = glob.glob(os.path.join(CHEKPOINT, '*'))\n systems = list(map(lambda s: s.rsplit(f'{os.sep}', 1)[-1], systems))\n if not sys_name in systems or len(systems) == 0:\n raise ValueError(\n f'ERROR : could not find {sys_name} in {CHEKPOINT} directory.')\n sys_path = os.path.join(CHEKPOINT, sys_name)\n sys_weights = glob.glob(os.path.join(sys_path, WEIGHTS, '*.h5'))\n if len(sys_weights) == 0:\n raise ValueError(\n f'ERROR : no weights has been found in {WEIGHTS} folder.')\n h5_files = ['rgb', 'oflow', 'lstm', 'cpu']\n h5_dirs = {}\n for h5_file in h5_files:\n h5_dir = [weights for weights in sys_weights if h5_file in weights.\n lower()]\n if len(h5_dir) > 1:\n raise ValueError(\n f'ERROR : In {h5_dir[0].rsplit(os.sep, 1)[0]} directory more than one {h5_file} file found.'\n )\n h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n sys_labels = glob.glob(os.path.join(sys_path, LABELS, '*.csv'))\n if len(sys_labels) != 1:\n raise ValueError(f'ERROR : something wrong with {LABELS} folder.')\n return h5_dirs, sys_labels[0]\n\n\ndef print_sys_info(args):\n print('running the system with:')\n for arg in vars(args):\n print(' ' * 3, f'{arg} = {getattr(args, arg)}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-run', '--run', dest='run_method', type=str,\n default='webcam', help='choose a way to test the sign language system.'\n )\n parser.add_argument('-sys', '--system', dest='system_name', type=str,\n default='turkish_10_word', help=\n 'choose which sign language system to run.')\n parser.add_argument('-use_lstm', '--use_lstm', dest='use_lstm', type=\n bool, default=False, help='add lstm on top of stream network.')\n parser.add_argument('-rgb', '--rgb_only', dest='use_rgb', type=bool,\n default=True, help='just use rgb stream.')\n parser.add_argument('-oflow', '--oflow_only', dest='use_oflow', type=\n bool, default=False, help='just use optical flow stream.')\n parser.add_argument('-on_cpu', '--use_cpu', dest='on_cpu', type=bool,\n default=True, help='run the system on cpu.')\n parser.add_argument('-pred_type', '--prediction_type', dest='pred_type',\n type=str, default='word', help=\n 'define how the system output will be, either word or sentence.')\n parser.add_argument('-nTop', '--top_predictions', dest='nTop', type=int,\n default=3, help='how many result(output) should the system give.')\n parser.add_argument('-download', '--download', dest='download', type=\n bool, default=False, help=\n 'download weights and classes to checkpoints directory.')\n parser.add_argument('-mul_oflow', '--multiprocessing_opticalflow', dest\n ='mul_oflow', type=bool, default=False, help=\n 'faster optical flow calculation with multiprocessing.')\n parser.add_argument('-oflow_pnum', '--oflow_process_num', dest=\n 'oflow_pnum', type=int, default=4, help=\n 'number of processes to calculate optical flow.')\n parser.add_argument('-mul_2stream', '--multiprocessing_two_stream',\n dest='mul_2stream', type=bool, default=False, help=\n 'run two stream on different processes.')\n args = parser.parse_args()\n run_method = args.run_method\n use_lstm = args.use_lstm\n use_rgb = args.use_rgb\n use_oflow = args.use_oflow\n on_cpu = args.on_cpu\n pred_type = args.pred_type\n nTop = args.nTop\n download = args.download\n mul_oflow = args.mul_oflow\n oflow_pnum = args.oflow_pnum\n mul_2stream = args.mul_2stream\n system_name = args.system_name\n if download:\n from checkpoints.download import download_sys\n Dir = CHEKPOINT + os.sep + system_name\n print(\n f'downloading weights and lables for {system_name} system to {Dir}.'\n )\n download_sys(system_name, Dir)\n models_dir, labels_dir = get_sys_info(system_name)\n print(f'In {args.system_name} folder:')\n for k, v in models_dir.items():\n if v is not None:\n print(\n f\"{' ' * 4}{k.upper()} WEIGHTS found : {v.rsplit(os.sep, 1)[-1]}\"\n )\n print(f\"{' ' * 4}labels : {labels_dir.rsplit(os.sep, 1)[-1]}\")\n if use_rgb and use_oflow:\n raise ValueError(\n \"\"\"ERROR : both rgb and oflow flags are on.\n\t\t\t\t\t\t trying to use both? set both flag to 'False'\"\"\"\n )\n if not pred_type == 'word' and not pred_type == 'sentence':\n raise ValueError(\"ERROR : pred_type should be 'word' or 'sentence'\")\n con = mul_oflow and not oflow_pnum > 0\n if con:\n raise ValueError('ERROR : check mul_oflow and oflow_pnum flags.')\n if not on_cpu and mul_2stream:\n raise ValueError(\n \"ERROR : you can't use multiprocessing on streams while the system is running on gpu.\"\n )\n if (use_rgb or use_oflow) and mul_2stream:\n raise ValueError(\n \"ERROR : you can't do multiprocessing while using just one stream!.\"\n )\n print_sys_info(args)\n os.makedirs('./tmp', exist_ok=True)\n if on_cpu:\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n from collections import defaultdict\n models = defaultdict(lambda : None)\n from utils.util import load_models, csv_to_dict\n from multiprocessing import Manager\n from multiprocessing import Process\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n if not mul_2stream:\n print(f'loading labels from {labels_dir}.')\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n print(f'{len(labels)} word found in {labels_dir}')\n uploading_time = time.time()\n print('Initializing models')\n models = load_models(models_dir, on_cpu, use_rgb, use_oflow,\n use_lstm, False)\n print(f'Uploading took {round(time.time() - uploading_time, 2)} sec')\n else:\n models['oflow'] = 1\n from utils.parallel_streams import nn_work\n _2stream.append(Process(target=nn_work, args=('oflow', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n _2stream.append(Process(target=nn_work, args=('rgb', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n if use_lstm:\n _2stream.append(Process(target=nn_work, args=('oflow',\n models_dir, labels_dir, pred_type, nTop, mul_oflow,\n oflow_pnum)))\n for p in _2stream:\n p.start()\n print(f'{len(_2stream)} process has been initialized.')\n if run_method == 'wamp':\n print('running wamp server.')\n from run.wamp import run_server\n if not os.path.exists(wamp_folder):\n raise ValueError(\n f\"ERROR : can't find wamp service in {wamp_folder} directory\")\n run_server(php_webservice, wamp_folder, models, labels, pred_type,\n nTop, mul_oflow, oflow_pnum, mul_2stream)\n elif run_method == 'webcam':\n print(\"testing system on webcam, to close webcam press 'q'.\")\n from run.webcam import test\n test(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream)\n elif run_method == 'REST_API':\n print('Initiate REST API server ...')\n from run.REST_API import server\n server.run(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream, host='0.0.0.0')\n",
"<import token>\nsys.path.insert(0, './utils')\n<import token>\nCHEKPOINT = './checkpoints'\nWEIGHTS = 'weights'\nLABELS = 'classes'\nphp_webservice = 'http://localhost/combine/webservices.php'\nwamp_folder = 'C:/wamp64/www/combine/'\n\n\ndef get_sys_info(sys_name):\n rgb_dir = None\n oflow_dir = None\n lstm_dir = None\n labels = None\n systems = glob.glob(os.path.join(CHEKPOINT, '*'))\n systems = list(map(lambda s: s.rsplit(f'{os.sep}', 1)[-1], systems))\n if not sys_name in systems or len(systems) == 0:\n raise ValueError(\n f'ERROR : could not find {sys_name} in {CHEKPOINT} directory.')\n sys_path = os.path.join(CHEKPOINT, sys_name)\n sys_weights = glob.glob(os.path.join(sys_path, WEIGHTS, '*.h5'))\n if len(sys_weights) == 0:\n raise ValueError(\n f'ERROR : no weights has been found in {WEIGHTS} folder.')\n h5_files = ['rgb', 'oflow', 'lstm', 'cpu']\n h5_dirs = {}\n for h5_file in h5_files:\n h5_dir = [weights for weights in sys_weights if h5_file in weights.\n lower()]\n if len(h5_dir) > 1:\n raise ValueError(\n f'ERROR : In {h5_dir[0].rsplit(os.sep, 1)[0]} directory more than one {h5_file} file found.'\n )\n h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n sys_labels = glob.glob(os.path.join(sys_path, LABELS, '*.csv'))\n if len(sys_labels) != 1:\n raise ValueError(f'ERROR : something wrong with {LABELS} folder.')\n return h5_dirs, sys_labels[0]\n\n\ndef print_sys_info(args):\n print('running the system with:')\n for arg in vars(args):\n print(' ' * 3, f'{arg} = {getattr(args, arg)}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-run', '--run', dest='run_method', type=str,\n default='webcam', help='choose a way to test the sign language system.'\n )\n parser.add_argument('-sys', '--system', dest='system_name', type=str,\n default='turkish_10_word', help=\n 'choose which sign language system to run.')\n parser.add_argument('-use_lstm', '--use_lstm', dest='use_lstm', type=\n bool, default=False, help='add lstm on top of stream network.')\n parser.add_argument('-rgb', '--rgb_only', dest='use_rgb', type=bool,\n default=True, help='just use rgb stream.')\n parser.add_argument('-oflow', '--oflow_only', dest='use_oflow', type=\n bool, default=False, help='just use optical flow stream.')\n parser.add_argument('-on_cpu', '--use_cpu', dest='on_cpu', type=bool,\n default=True, help='run the system on cpu.')\n parser.add_argument('-pred_type', '--prediction_type', dest='pred_type',\n type=str, default='word', help=\n 'define how the system output will be, either word or sentence.')\n parser.add_argument('-nTop', '--top_predictions', dest='nTop', type=int,\n default=3, help='how many result(output) should the system give.')\n parser.add_argument('-download', '--download', dest='download', type=\n bool, default=False, help=\n 'download weights and classes to checkpoints directory.')\n parser.add_argument('-mul_oflow', '--multiprocessing_opticalflow', dest\n ='mul_oflow', type=bool, default=False, help=\n 'faster optical flow calculation with multiprocessing.')\n parser.add_argument('-oflow_pnum', '--oflow_process_num', dest=\n 'oflow_pnum', type=int, default=4, help=\n 'number of processes to calculate optical flow.')\n parser.add_argument('-mul_2stream', '--multiprocessing_two_stream',\n dest='mul_2stream', type=bool, default=False, help=\n 'run two stream on different processes.')\n args = parser.parse_args()\n run_method = args.run_method\n use_lstm = args.use_lstm\n use_rgb = args.use_rgb\n use_oflow = args.use_oflow\n on_cpu = args.on_cpu\n pred_type = args.pred_type\n nTop = args.nTop\n download = args.download\n mul_oflow = args.mul_oflow\n oflow_pnum = args.oflow_pnum\n mul_2stream = args.mul_2stream\n system_name = args.system_name\n if download:\n from checkpoints.download import download_sys\n Dir = CHEKPOINT + os.sep + system_name\n print(\n f'downloading weights and lables for {system_name} system to {Dir}.'\n )\n download_sys(system_name, Dir)\n models_dir, labels_dir = get_sys_info(system_name)\n print(f'In {args.system_name} folder:')\n for k, v in models_dir.items():\n if v is not None:\n print(\n f\"{' ' * 4}{k.upper()} WEIGHTS found : {v.rsplit(os.sep, 1)[-1]}\"\n )\n print(f\"{' ' * 4}labels : {labels_dir.rsplit(os.sep, 1)[-1]}\")\n if use_rgb and use_oflow:\n raise ValueError(\n \"\"\"ERROR : both rgb and oflow flags are on.\n\t\t\t\t\t\t trying to use both? set both flag to 'False'\"\"\"\n )\n if not pred_type == 'word' and not pred_type == 'sentence':\n raise ValueError(\"ERROR : pred_type should be 'word' or 'sentence'\")\n con = mul_oflow and not oflow_pnum > 0\n if con:\n raise ValueError('ERROR : check mul_oflow and oflow_pnum flags.')\n if not on_cpu and mul_2stream:\n raise ValueError(\n \"ERROR : you can't use multiprocessing on streams while the system is running on gpu.\"\n )\n if (use_rgb or use_oflow) and mul_2stream:\n raise ValueError(\n \"ERROR : you can't do multiprocessing while using just one stream!.\"\n )\n print_sys_info(args)\n os.makedirs('./tmp', exist_ok=True)\n if on_cpu:\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n from collections import defaultdict\n models = defaultdict(lambda : None)\n from utils.util import load_models, csv_to_dict\n from multiprocessing import Manager\n from multiprocessing import Process\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n if not mul_2stream:\n print(f'loading labels from {labels_dir}.')\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n print(f'{len(labels)} word found in {labels_dir}')\n uploading_time = time.time()\n print('Initializing models')\n models = load_models(models_dir, on_cpu, use_rgb, use_oflow,\n use_lstm, False)\n print(f'Uploading took {round(time.time() - uploading_time, 2)} sec')\n else:\n models['oflow'] = 1\n from utils.parallel_streams import nn_work\n _2stream.append(Process(target=nn_work, args=('oflow', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n _2stream.append(Process(target=nn_work, args=('rgb', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n if use_lstm:\n _2stream.append(Process(target=nn_work, args=('oflow',\n models_dir, labels_dir, pred_type, nTop, mul_oflow,\n oflow_pnum)))\n for p in _2stream:\n p.start()\n print(f'{len(_2stream)} process has been initialized.')\n if run_method == 'wamp':\n print('running wamp server.')\n from run.wamp import run_server\n if not os.path.exists(wamp_folder):\n raise ValueError(\n f\"ERROR : can't find wamp service in {wamp_folder} directory\")\n run_server(php_webservice, wamp_folder, models, labels, pred_type,\n nTop, mul_oflow, oflow_pnum, mul_2stream)\n elif run_method == 'webcam':\n print(\"testing system on webcam, to close webcam press 'q'.\")\n from run.webcam import test\n test(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream)\n elif run_method == 'REST_API':\n print('Initiate REST API server ...')\n from run.REST_API import server\n server.run(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream, host='0.0.0.0')\n",
"<import token>\nsys.path.insert(0, './utils')\n<import token>\n<assignment token>\n\n\ndef get_sys_info(sys_name):\n rgb_dir = None\n oflow_dir = None\n lstm_dir = None\n labels = None\n systems = glob.glob(os.path.join(CHEKPOINT, '*'))\n systems = list(map(lambda s: s.rsplit(f'{os.sep}', 1)[-1], systems))\n if not sys_name in systems or len(systems) == 0:\n raise ValueError(\n f'ERROR : could not find {sys_name} in {CHEKPOINT} directory.')\n sys_path = os.path.join(CHEKPOINT, sys_name)\n sys_weights = glob.glob(os.path.join(sys_path, WEIGHTS, '*.h5'))\n if len(sys_weights) == 0:\n raise ValueError(\n f'ERROR : no weights has been found in {WEIGHTS} folder.')\n h5_files = ['rgb', 'oflow', 'lstm', 'cpu']\n h5_dirs = {}\n for h5_file in h5_files:\n h5_dir = [weights for weights in sys_weights if h5_file in weights.\n lower()]\n if len(h5_dir) > 1:\n raise ValueError(\n f'ERROR : In {h5_dir[0].rsplit(os.sep, 1)[0]} directory more than one {h5_file} file found.'\n )\n h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n sys_labels = glob.glob(os.path.join(sys_path, LABELS, '*.csv'))\n if len(sys_labels) != 1:\n raise ValueError(f'ERROR : something wrong with {LABELS} folder.')\n return h5_dirs, sys_labels[0]\n\n\ndef print_sys_info(args):\n print('running the system with:')\n for arg in vars(args):\n print(' ' * 3, f'{arg} = {getattr(args, arg)}')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-run', '--run', dest='run_method', type=str,\n default='webcam', help='choose a way to test the sign language system.'\n )\n parser.add_argument('-sys', '--system', dest='system_name', type=str,\n default='turkish_10_word', help=\n 'choose which sign language system to run.')\n parser.add_argument('-use_lstm', '--use_lstm', dest='use_lstm', type=\n bool, default=False, help='add lstm on top of stream network.')\n parser.add_argument('-rgb', '--rgb_only', dest='use_rgb', type=bool,\n default=True, help='just use rgb stream.')\n parser.add_argument('-oflow', '--oflow_only', dest='use_oflow', type=\n bool, default=False, help='just use optical flow stream.')\n parser.add_argument('-on_cpu', '--use_cpu', dest='on_cpu', type=bool,\n default=True, help='run the system on cpu.')\n parser.add_argument('-pred_type', '--prediction_type', dest='pred_type',\n type=str, default='word', help=\n 'define how the system output will be, either word or sentence.')\n parser.add_argument('-nTop', '--top_predictions', dest='nTop', type=int,\n default=3, help='how many result(output) should the system give.')\n parser.add_argument('-download', '--download', dest='download', type=\n bool, default=False, help=\n 'download weights and classes to checkpoints directory.')\n parser.add_argument('-mul_oflow', '--multiprocessing_opticalflow', dest\n ='mul_oflow', type=bool, default=False, help=\n 'faster optical flow calculation with multiprocessing.')\n parser.add_argument('-oflow_pnum', '--oflow_process_num', dest=\n 'oflow_pnum', type=int, default=4, help=\n 'number of processes to calculate optical flow.')\n parser.add_argument('-mul_2stream', '--multiprocessing_two_stream',\n dest='mul_2stream', type=bool, default=False, help=\n 'run two stream on different processes.')\n args = parser.parse_args()\n run_method = args.run_method\n use_lstm = args.use_lstm\n use_rgb = args.use_rgb\n use_oflow = args.use_oflow\n on_cpu = args.on_cpu\n pred_type = args.pred_type\n nTop = args.nTop\n download = args.download\n mul_oflow = args.mul_oflow\n oflow_pnum = args.oflow_pnum\n mul_2stream = args.mul_2stream\n system_name = args.system_name\n if download:\n from checkpoints.download import download_sys\n Dir = CHEKPOINT + os.sep + system_name\n print(\n f'downloading weights and lables for {system_name} system to {Dir}.'\n )\n download_sys(system_name, Dir)\n models_dir, labels_dir = get_sys_info(system_name)\n print(f'In {args.system_name} folder:')\n for k, v in models_dir.items():\n if v is not None:\n print(\n f\"{' ' * 4}{k.upper()} WEIGHTS found : {v.rsplit(os.sep, 1)[-1]}\"\n )\n print(f\"{' ' * 4}labels : {labels_dir.rsplit(os.sep, 1)[-1]}\")\n if use_rgb and use_oflow:\n raise ValueError(\n \"\"\"ERROR : both rgb and oflow flags are on.\n\t\t\t\t\t\t trying to use both? set both flag to 'False'\"\"\"\n )\n if not pred_type == 'word' and not pred_type == 'sentence':\n raise ValueError(\"ERROR : pred_type should be 'word' or 'sentence'\")\n con = mul_oflow and not oflow_pnum > 0\n if con:\n raise ValueError('ERROR : check mul_oflow and oflow_pnum flags.')\n if not on_cpu and mul_2stream:\n raise ValueError(\n \"ERROR : you can't use multiprocessing on streams while the system is running on gpu.\"\n )\n if (use_rgb or use_oflow) and mul_2stream:\n raise ValueError(\n \"ERROR : you can't do multiprocessing while using just one stream!.\"\n )\n print_sys_info(args)\n os.makedirs('./tmp', exist_ok=True)\n if on_cpu:\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n from collections import defaultdict\n models = defaultdict(lambda : None)\n from utils.util import load_models, csv_to_dict\n from multiprocessing import Manager\n from multiprocessing import Process\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n if not mul_2stream:\n print(f'loading labels from {labels_dir}.')\n labels = csv_to_dict(labels_dir, LABELS_SWORD_COL)\n print(f'{len(labels)} word found in {labels_dir}')\n uploading_time = time.time()\n print('Initializing models')\n models = load_models(models_dir, on_cpu, use_rgb, use_oflow,\n use_lstm, False)\n print(f'Uploading took {round(time.time() - uploading_time, 2)} sec')\n else:\n models['oflow'] = 1\n from utils.parallel_streams import nn_work\n _2stream.append(Process(target=nn_work, args=('oflow', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n _2stream.append(Process(target=nn_work, args=('rgb', models_dir,\n labels_dir, pred_type, nTop, mul_oflow, oflow_pnum)))\n if use_lstm:\n _2stream.append(Process(target=nn_work, args=('oflow',\n models_dir, labels_dir, pred_type, nTop, mul_oflow,\n oflow_pnum)))\n for p in _2stream:\n p.start()\n print(f'{len(_2stream)} process has been initialized.')\n if run_method == 'wamp':\n print('running wamp server.')\n from run.wamp import run_server\n if not os.path.exists(wamp_folder):\n raise ValueError(\n f\"ERROR : can't find wamp service in {wamp_folder} directory\")\n run_server(php_webservice, wamp_folder, models, labels, pred_type,\n nTop, mul_oflow, oflow_pnum, mul_2stream)\n elif run_method == 'webcam':\n print(\"testing system on webcam, to close webcam press 'q'.\")\n from run.webcam import test\n test(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream)\n elif run_method == 'REST_API':\n print('Initiate REST API server ...')\n from run.REST_API import server\n server.run(models, labels, pred_type, nTop, mul_oflow, oflow_pnum,\n mul_2stream, host='0.0.0.0')\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef get_sys_info(sys_name):\n rgb_dir = None\n oflow_dir = None\n lstm_dir = None\n labels = None\n systems = glob.glob(os.path.join(CHEKPOINT, '*'))\n systems = list(map(lambda s: s.rsplit(f'{os.sep}', 1)[-1], systems))\n if not sys_name in systems or len(systems) == 0:\n raise ValueError(\n f'ERROR : could not find {sys_name} in {CHEKPOINT} directory.')\n sys_path = os.path.join(CHEKPOINT, sys_name)\n sys_weights = glob.glob(os.path.join(sys_path, WEIGHTS, '*.h5'))\n if len(sys_weights) == 0:\n raise ValueError(\n f'ERROR : no weights has been found in {WEIGHTS} folder.')\n h5_files = ['rgb', 'oflow', 'lstm', 'cpu']\n h5_dirs = {}\n for h5_file in h5_files:\n h5_dir = [weights for weights in sys_weights if h5_file in weights.\n lower()]\n if len(h5_dir) > 1:\n raise ValueError(\n f'ERROR : In {h5_dir[0].rsplit(os.sep, 1)[0]} directory more than one {h5_file} file found.'\n )\n h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n sys_labels = glob.glob(os.path.join(sys_path, LABELS, '*.csv'))\n if len(sys_labels) != 1:\n raise ValueError(f'ERROR : something wrong with {LABELS} folder.')\n return h5_dirs, sys_labels[0]\n\n\ndef print_sys_info(args):\n print('running the system with:')\n for arg in vars(args):\n print(' ' * 3, f'{arg} = {getattr(args, arg)}')\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef get_sys_info(sys_name):\n rgb_dir = None\n oflow_dir = None\n lstm_dir = None\n labels = None\n systems = glob.glob(os.path.join(CHEKPOINT, '*'))\n systems = list(map(lambda s: s.rsplit(f'{os.sep}', 1)[-1], systems))\n if not sys_name in systems or len(systems) == 0:\n raise ValueError(\n f'ERROR : could not find {sys_name} in {CHEKPOINT} directory.')\n sys_path = os.path.join(CHEKPOINT, sys_name)\n sys_weights = glob.glob(os.path.join(sys_path, WEIGHTS, '*.h5'))\n if len(sys_weights) == 0:\n raise ValueError(\n f'ERROR : no weights has been found in {WEIGHTS} folder.')\n h5_files = ['rgb', 'oflow', 'lstm', 'cpu']\n h5_dirs = {}\n for h5_file in h5_files:\n h5_dir = [weights for weights in sys_weights if h5_file in weights.\n lower()]\n if len(h5_dir) > 1:\n raise ValueError(\n f'ERROR : In {h5_dir[0].rsplit(os.sep, 1)[0]} directory more than one {h5_file} file found.'\n )\n h5_dirs[h5_file] = h5_dir[0] if len(h5_dir) > 0 else None\n sys_labels = glob.glob(os.path.join(sys_path, LABELS, '*.csv'))\n if len(sys_labels) != 1:\n raise ValueError(f'ERROR : something wrong with {LABELS} folder.')\n return h5_dirs, sys_labels[0]\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,531 |
a48a05321659aa6dabf7a8743007d01c1c3d74cc
|
import json
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from django.shortcuts import render_to_response
from utils import validate_geojson, get_remote_json
from exc import GeoJSONValidationException, NonFetchableURLException
def home(request):
"""
GET /
Show the home page
"""
return render_to_response('index.html')
@require_http_methods(['GET', 'POST'])
def validate(request):
"""
POST /validate
Validate GeoJSON data in POST body
"""
testing = request.GET.get('testing')
if request.method == 'POST':
stringy_json = request.raw_post_data
else: # GET
try:
remote_url = request.GET['url']
stringy_json = get_remote_json(remote_url)
except KeyError: # The "url" URL parameter was missing
return _geojson_error('When validating via GET, a "url" URL parameter is required.', status=400)
except NonFetchableURLException:
return _geojson_error('The URL passed could not be fetched.')
try:
test_geojson = json.loads(stringy_json)
if not isinstance(test_geojson, dict):
return _geojson_error('Data was not a JSON object.', testing)
except:
return _geojson_error('Data was not JSON serializeable.', testing)
if not 'type' in test_geojson:
return _geojson_error('The "type" member is required and was not found.', testing)
try:
validate_geojson(test_geojson)
except GeoJSONValidationException as e:
return _geojson_error(str(e), testing)
# Everything checked out. Return 'ok'.
resp = {
'status': 'ok',
}
return HttpResponse(json.dumps(resp), mimetype='application/json')
def _geojson_error(message, testing=False, status=200):
resp = {
'status': 'error',
'message': message,
}
return HttpResponse(json.dumps(resp), mimetype='application/json', status=status)
|
[
"import json\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render_to_response\n\nfrom utils import validate_geojson, get_remote_json\nfrom exc import GeoJSONValidationException, NonFetchableURLException\n\n\ndef home(request):\n \"\"\"\n GET /\n\n Show the home page\n \"\"\"\n return render_to_response('index.html')\n\n\n@require_http_methods(['GET', 'POST'])\ndef validate(request):\n \"\"\"\n POST /validate\n\n Validate GeoJSON data in POST body\n \"\"\"\n\n testing = request.GET.get('testing')\n\n if request.method == 'POST':\n stringy_json = request.raw_post_data\n else: # GET\n try:\n remote_url = request.GET['url']\n stringy_json = get_remote_json(remote_url)\n except KeyError: # The \"url\" URL parameter was missing\n return _geojson_error('When validating via GET, a \"url\" URL parameter is required.', status=400)\n except NonFetchableURLException:\n return _geojson_error('The URL passed could not be fetched.')\n\n try:\n test_geojson = json.loads(stringy_json)\n if not isinstance(test_geojson, dict):\n return _geojson_error('Data was not a JSON object.', testing)\n except:\n return _geojson_error('Data was not JSON serializeable.', testing)\n\n if not 'type' in test_geojson:\n return _geojson_error('The \"type\" member is required and was not found.', testing)\n\n try:\n validate_geojson(test_geojson)\n except GeoJSONValidationException as e:\n return _geojson_error(str(e), testing)\n\n # Everything checked out. Return 'ok'.\n resp = {\n 'status': 'ok',\n }\n return HttpResponse(json.dumps(resp), mimetype='application/json')\n\n\ndef _geojson_error(message, testing=False, status=200):\n resp = {\n 'status': 'error',\n 'message': message,\n }\n return HttpResponse(json.dumps(resp), mimetype='application/json', status=status)\n",
"import json\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render_to_response\nfrom utils import validate_geojson, get_remote_json\nfrom exc import GeoJSONValidationException, NonFetchableURLException\n\n\ndef home(request):\n \"\"\"\n GET /\n\n Show the home page\n \"\"\"\n return render_to_response('index.html')\n\n\n@require_http_methods(['GET', 'POST'])\ndef validate(request):\n \"\"\"\n POST /validate\n\n Validate GeoJSON data in POST body\n \"\"\"\n testing = request.GET.get('testing')\n if request.method == 'POST':\n stringy_json = request.raw_post_data\n else:\n try:\n remote_url = request.GET['url']\n stringy_json = get_remote_json(remote_url)\n except KeyError:\n return _geojson_error(\n 'When validating via GET, a \"url\" URL parameter is required.',\n status=400)\n except NonFetchableURLException:\n return _geojson_error('The URL passed could not be fetched.')\n try:\n test_geojson = json.loads(stringy_json)\n if not isinstance(test_geojson, dict):\n return _geojson_error('Data was not a JSON object.', testing)\n except:\n return _geojson_error('Data was not JSON serializeable.', testing)\n if not 'type' in test_geojson:\n return _geojson_error(\n 'The \"type\" member is required and was not found.', testing)\n try:\n validate_geojson(test_geojson)\n except GeoJSONValidationException as e:\n return _geojson_error(str(e), testing)\n resp = {'status': 'ok'}\n return HttpResponse(json.dumps(resp), mimetype='application/json')\n\n\ndef _geojson_error(message, testing=False, status=200):\n resp = {'status': 'error', 'message': message}\n return HttpResponse(json.dumps(resp), mimetype='application/json',\n status=status)\n",
"<import token>\n\n\ndef home(request):\n \"\"\"\n GET /\n\n Show the home page\n \"\"\"\n return render_to_response('index.html')\n\n\n@require_http_methods(['GET', 'POST'])\ndef validate(request):\n \"\"\"\n POST /validate\n\n Validate GeoJSON data in POST body\n \"\"\"\n testing = request.GET.get('testing')\n if request.method == 'POST':\n stringy_json = request.raw_post_data\n else:\n try:\n remote_url = request.GET['url']\n stringy_json = get_remote_json(remote_url)\n except KeyError:\n return _geojson_error(\n 'When validating via GET, a \"url\" URL parameter is required.',\n status=400)\n except NonFetchableURLException:\n return _geojson_error('The URL passed could not be fetched.')\n try:\n test_geojson = json.loads(stringy_json)\n if not isinstance(test_geojson, dict):\n return _geojson_error('Data was not a JSON object.', testing)\n except:\n return _geojson_error('Data was not JSON serializeable.', testing)\n if not 'type' in test_geojson:\n return _geojson_error(\n 'The \"type\" member is required and was not found.', testing)\n try:\n validate_geojson(test_geojson)\n except GeoJSONValidationException as e:\n return _geojson_error(str(e), testing)\n resp = {'status': 'ok'}\n return HttpResponse(json.dumps(resp), mimetype='application/json')\n\n\ndef _geojson_error(message, testing=False, status=200):\n resp = {'status': 'error', 'message': message}\n return HttpResponse(json.dumps(resp), mimetype='application/json',\n status=status)\n",
"<import token>\n<function token>\n\n\n@require_http_methods(['GET', 'POST'])\ndef validate(request):\n \"\"\"\n POST /validate\n\n Validate GeoJSON data in POST body\n \"\"\"\n testing = request.GET.get('testing')\n if request.method == 'POST':\n stringy_json = request.raw_post_data\n else:\n try:\n remote_url = request.GET['url']\n stringy_json = get_remote_json(remote_url)\n except KeyError:\n return _geojson_error(\n 'When validating via GET, a \"url\" URL parameter is required.',\n status=400)\n except NonFetchableURLException:\n return _geojson_error('The URL passed could not be fetched.')\n try:\n test_geojson = json.loads(stringy_json)\n if not isinstance(test_geojson, dict):\n return _geojson_error('Data was not a JSON object.', testing)\n except:\n return _geojson_error('Data was not JSON serializeable.', testing)\n if not 'type' in test_geojson:\n return _geojson_error(\n 'The \"type\" member is required and was not found.', testing)\n try:\n validate_geojson(test_geojson)\n except GeoJSONValidationException as e:\n return _geojson_error(str(e), testing)\n resp = {'status': 'ok'}\n return HttpResponse(json.dumps(resp), mimetype='application/json')\n\n\ndef _geojson_error(message, testing=False, status=200):\n resp = {'status': 'error', 'message': message}\n return HttpResponse(json.dumps(resp), mimetype='application/json',\n status=status)\n",
"<import token>\n<function token>\n\n\n@require_http_methods(['GET', 'POST'])\ndef validate(request):\n \"\"\"\n POST /validate\n\n Validate GeoJSON data in POST body\n \"\"\"\n testing = request.GET.get('testing')\n if request.method == 'POST':\n stringy_json = request.raw_post_data\n else:\n try:\n remote_url = request.GET['url']\n stringy_json = get_remote_json(remote_url)\n except KeyError:\n return _geojson_error(\n 'When validating via GET, a \"url\" URL parameter is required.',\n status=400)\n except NonFetchableURLException:\n return _geojson_error('The URL passed could not be fetched.')\n try:\n test_geojson = json.loads(stringy_json)\n if not isinstance(test_geojson, dict):\n return _geojson_error('Data was not a JSON object.', testing)\n except:\n return _geojson_error('Data was not JSON serializeable.', testing)\n if not 'type' in test_geojson:\n return _geojson_error(\n 'The \"type\" member is required and was not found.', testing)\n try:\n validate_geojson(test_geojson)\n except GeoJSONValidationException as e:\n return _geojson_error(str(e), testing)\n resp = {'status': 'ok'}\n return HttpResponse(json.dumps(resp), mimetype='application/json')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,532 |
bf1bddc8dafc357fcdc4a4ca156d4a3b19a94e56
|
ITEM: TIMESTEP
1000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-2.7012907437591949e-01 4.7470129074369581e+01
-2.7012907437591949e-01 4.7470129074369581e+01
-2.7012907437591949e-01 4.7470129074369581e+01
ITEM: ATOMS id type xs ys zs
8 1 0.118514 0.0599866 0.0631161
35 1 0.0692648 0.12677 0.0619928
130 1 0.0703103 0.0649093 0.126251
165 1 0.138603 0.117881 0.127473
155 1 0.802452 -0.00225189 0.190937
279 1 0.682405 -0.00102136 0.320418
85 1 0.617155 0.249443 0.014004
134 1 0.190224 0.042665 0.122036
12 1 0.248376 0.0662609 0.0678188
39 1 0.189758 0.118437 0.0604761
43 1 0.30201 0.122494 0.054495
138 1 0.318919 0.0606008 0.121184
169 1 0.25401 0.123982 0.124703
618 1 0.314046 0.423935 0.498843
157 1 0.882592 0.0144295 0.135182
1167 1 0.446654 0.498932 0.178384
1439 1 0.930856 0.489936 0.429996
16 1 0.377532 0.0788858 0.0614955
47 1 0.443232 0.119355 0.0599885
142 1 0.437491 0.0522965 0.123421
173 1 0.381328 0.126711 0.132362
177 1 0.498436 0.123563 0.127073
594 1 0.571856 0.317841 0.497458
1183 1 0.939923 0.497092 0.177559
1415 1 0.183325 0.497855 0.452079
50 1 0.559938 0.181833 -0.000667064
20 1 0.504471 0.0635768 0.0603335
24 1 0.626716 0.0534061 0.0583679
51 1 0.567867 0.120083 0.0614
146 1 0.550245 0.0584699 0.124254
181 1 0.634136 0.115482 0.116651
407 1 0.686788 0.00162551 0.449686
49 1 0.497971 0.119963 0.00499224
150 1 0.698101 0.046347 0.110991
28 1 0.755827 0.0629804 0.060971
55 1 0.690196 0.125129 0.0563922
59 1 0.814925 0.123202 0.0486223
154 1 0.814239 0.0472368 0.1162
185 1 0.748493 0.115375 0.124793
389 1 0.131156 0.00768717 0.374367
122 1 0.813826 0.45016 -0.00226142
4 1 0.00571049 0.0642641 0.0516869
161 1 0.0145263 0.120345 0.125595
32 1 0.882638 0.0641087 0.0624529
63 1 0.944441 0.133281 0.0668484
158 1 0.942773 0.0621624 0.121663
189 1 0.884011 0.12647 0.120856
31 1 0.949647 0.00451724 0.0592671
40 1 0.126322 0.180487 0.0591702
67 1 0.076321 0.256897 0.0481963
162 1 0.0737709 0.18292 0.125443
197 1 0.124175 0.243848 0.132161
72 1 0.128136 0.314366 0.0799744
194 1 0.068254 0.291934 0.117875
193 1 0.00924748 0.233044 0.121644
36 1 0.0115579 0.187866 0.0596469
1437 1 0.863518 0.486486 0.384824
411 1 0.807086 0.0047618 0.438232
391 1 0.187557 0.0128456 0.441716
166 1 0.186668 0.177231 0.122783
198 1 0.178173 0.312545 0.144773
170 1 0.317746 0.169357 0.138677
44 1 0.246056 0.188666 0.0715916
71 1 0.189382 0.250992 0.0744894
201 1 0.238849 0.241488 0.132444
75 1 0.301765 0.250298 0.0782923
76 1 0.239888 0.308852 0.0584296
202 1 0.293209 0.307263 0.137376
606 1 0.942553 0.303178 0.49923
401 1 0.496693 0.000873129 0.37682
102 1 0.182723 0.436664 -0.000478082
1427 1 0.570435 0.500057 0.429309
48 1 0.393356 0.190678 0.0599849
174 1 0.437689 0.18471 0.139591
79 1 0.440983 0.255103 0.0581657
205 1 0.365623 0.249313 0.130058
80 1 0.379197 0.303608 0.0704467
206 1 0.432488 0.323907 0.127045
621 1 0.373617 0.367529 0.49599
14 1 0.43862 0.0587564 0.00627447
52 1 0.484423 0.188543 0.0665317
84 1 0.500018 0.31934 0.07723
209 1 0.488284 0.254011 0.128159
178 1 0.546072 0.185375 0.136579
56 1 0.613229 0.185539 0.0768676
83 1 0.545828 0.242915 0.0664197
88 1 0.611037 0.319933 0.0619186
210 1 0.555776 0.302342 0.129695
213 1 0.622974 0.244927 0.125081
186 1 0.802287 0.173743 0.124388
182 1 0.69117 0.179418 0.113282
60 1 0.75244 0.195119 0.0569863
87 1 0.691608 0.250977 0.0559722
217 1 0.754972 0.236141 0.125877
218 1 0.823524 0.299716 0.118129
92 1 0.760961 0.313528 0.0687033
91 1 0.82084 0.237089 0.0685305
214 1 0.686067 0.313695 0.116799
1039 1 0.445446 0.49796 0.0697614
1173 1 0.627414 0.495995 0.116266
283 1 0.820211 0.00151803 0.318055
74 1 0.323208 0.294976 0.00990392
68 1 0.00301754 0.313786 0.0554314
190 1 0.950554 0.184051 0.13353
64 1 0.874802 0.186404 0.0518447
95 1 0.920281 0.251951 0.0571724
221 1 0.875537 0.220154 0.135128
222 1 0.95689 0.308093 0.121029
96 1 0.873156 0.312464 0.0536935
1177 1 0.753974 0.500885 0.119529
546 1 0.0630301 0.184789 0.495837
99 1 0.0687252 0.384531 0.0673004
104 1 0.136695 0.440047 0.0634071
226 1 0.0742081 0.450335 0.121242
229 1 0.126825 0.375727 0.135456
19 1 0.548714 0.00136653 0.058336
265 1 0.250521 0.00638232 0.237985
399 1 0.442478 0.0051812 0.437462
65 1 0.013382 0.251309 0.00485621
143 1 0.43864 0.00191848 0.186309
103 1 0.195233 0.369267 0.0611842
108 1 0.251689 0.420277 0.0546987
230 1 0.189032 0.429653 0.119762
233 1 0.246148 0.374706 0.125159
234 1 0.304974 0.434872 0.136514
107 1 0.304012 0.347258 0.066143
1305 1 0.755913 0.493287 0.248738
1283 1 0.0579747 0.488959 0.312582
111 1 0.41954 0.370177 0.0672681
112 1 0.354168 0.432917 0.0728413
237 1 0.366392 0.370227 0.129009
238 1 0.430399 0.430965 0.133122
578 1 0.0795431 0.302974 0.492861
116 1 0.501339 0.432268 0.0580372
241 1 0.482526 0.378881 0.134142
115 1 0.566084 0.382841 0.047451
120 1 0.640461 0.440452 0.0528615
242 1 0.56158 0.431715 0.110301
245 1 0.622856 0.376628 0.132713
38 1 0.190477 0.190911 0.00114873
566 1 0.677669 0.188694 0.496938
246 1 0.685867 0.443722 0.117934
119 1 0.690218 0.369414 0.0595599
123 1 0.818524 0.387424 0.0536907
124 1 0.748616 0.445361 0.0507502
249 1 0.757569 0.377445 0.116976
250 1 0.811663 0.438016 0.112322
510 1 0.915903 0.434147 0.36995
126 1 0.972249 0.449212 0.0017488
511 1 0.939026 0.368121 0.434918
1311 1 0.940591 0.48799 0.302905
23 1 0.691187 0.00148969 0.0509751
100 1 0.997448 0.449269 0.0774573
225 1 0.994329 0.376387 0.106942
127 1 0.929108 0.388978 0.050183
128 1 0.878847 0.45188 0.0501073
253 1 0.881601 0.366095 0.123638
254 1 0.935466 0.436971 0.127853
9 1 0.265799 0.000498899 0.0113705
163 1 0.0707224 0.12067 0.185834
136 1 0.131257 0.0632023 0.193464
258 1 0.065459 0.0641483 0.244256
264 1 0.130556 0.0663226 0.310906
291 1 0.066086 0.128529 0.320081
293 1 0.126742 0.121291 0.250636
260 1 0.00938821 0.064928 0.310796
289 1 0.001251 0.135354 0.26037
1421 1 0.370749 0.492516 0.371624
171 1 0.318207 0.106202 0.191346
262 1 0.200675 0.0637096 0.25129
167 1 0.192966 0.130268 0.188828
140 1 0.249489 0.0623364 0.178885
266 1 0.327336 0.0583692 0.25424
297 1 0.255416 0.124076 0.251139
268 1 0.267696 0.049357 0.312791
299 1 0.313521 0.111967 0.323083
144 1 0.372563 0.0565053 0.186376
175 1 0.435031 0.113593 0.19408
270 1 0.428226 0.0647762 0.253003
301 1 0.370036 0.119884 0.25734
303 1 0.437192 0.12239 0.317112
272 1 0.369799 0.0496622 0.318752
148 1 0.503398 0.0615788 0.194244
1179 1 0.8207 0.492422 0.181946
276 1 0.494555 0.0625772 0.305486
305 1 0.500672 0.120962 0.246257
179 1 0.57125 0.120613 0.172074
152 1 0.630617 0.0560961 0.186879
274 1 0.566035 0.0738323 0.245885
309 1 0.633561 0.118334 0.250946
280 1 0.618156 0.0540839 0.305725
307 1 0.567833 0.121863 0.308126
387 1 0.0697478 0.00643595 0.437573
156 1 0.752557 0.0516617 0.175757
183 1 0.692954 0.114931 0.177945
278 1 0.691187 0.0604386 0.239381
282 1 0.812934 0.0523742 0.259847
284 1 0.750159 0.0589086 0.308726
313 1 0.741775 0.12767 0.250088
187 1 0.819947 0.110361 0.178322
315 1 0.797582 0.118836 0.31243
311 1 0.688368 0.108002 0.310323
405 1 0.618834 0.00349033 0.374101
160 1 0.882446 0.0668656 0.199144
1035 1 0.302622 0.49573 0.0659356
132 1 0.0103734 0.0683327 0.190101
191 1 0.943116 0.116028 0.182531
286 1 0.94532 0.0727851 0.251805
288 1 0.876924 0.0607833 0.314023
317 1 0.86857 0.118622 0.264616
168 1 0.137856 0.183316 0.187528
195 1 0.0743311 0.235452 0.196622
200 1 0.109581 0.310019 0.193939
290 1 0.0721373 0.18299 0.253189
325 1 0.123716 0.247241 0.262148
322 1 0.062828 0.311933 0.263019
323 1 0.068578 0.249774 0.324761
296 1 0.119282 0.189087 0.333289
328 1 0.124947 0.317752 0.318061
196 1 0.0194401 0.292051 0.190091
164 1 0.0125664 0.183964 0.186864
172 1 0.251399 0.186957 0.190209
326 1 0.186542 0.302384 0.247595
327 1 0.191837 0.251715 0.321374
199 1 0.188637 0.240024 0.194065
294 1 0.184304 0.190726 0.254172
300 1 0.247523 0.180066 0.319473
329 1 0.243051 0.249727 0.262853
330 1 0.309259 0.310577 0.259672
331 1 0.30739 0.247344 0.307115
332 1 0.24043 0.322178 0.307556
204 1 0.25269 0.31284 0.201194
298 1 0.303916 0.183638 0.252862
203 1 0.309328 0.247044 0.182746
302 1 0.444861 0.178708 0.256232
176 1 0.370881 0.186633 0.199857
207 1 0.427222 0.247481 0.186114
304 1 0.367232 0.186046 0.30512
333 1 0.362061 0.256042 0.246677
334 1 0.44034 0.316548 0.252522
335 1 0.436936 0.254129 0.297573
336 1 0.375638 0.306719 0.308759
208 1 0.374193 0.312995 0.180471
308 1 0.507375 0.181129 0.311045
212 1 0.493034 0.310921 0.189424
180 1 0.500946 0.180957 0.19797
337 1 0.510236 0.251395 0.243778
184 1 0.62257 0.178732 0.191661
211 1 0.574451 0.248376 0.197664
216 1 0.624436 0.308785 0.182108
306 1 0.564385 0.174235 0.242509
312 1 0.638816 0.172643 0.316978
338 1 0.565728 0.315652 0.252899
339 1 0.56265 0.2578 0.308184
341 1 0.622307 0.231887 0.257868
344 1 0.63008 0.308303 0.312779
310 1 0.684542 0.183471 0.237051
340 1 0.515481 0.319836 0.303985
188 1 0.743699 0.173984 0.185743
342 1 0.690746 0.313526 0.237522
343 1 0.687366 0.249605 0.315489
314 1 0.803843 0.175876 0.251949
215 1 0.683977 0.243848 0.179279
345 1 0.740194 0.250409 0.236417
346 1 0.81016 0.322556 0.240466
220 1 0.752354 0.302462 0.179514
219 1 0.805974 0.243593 0.192992
348 1 0.753464 0.302853 0.298569
316 1 0.733869 0.17149 0.319092
347 1 0.806721 0.243829 0.299436
324 1 0.998384 0.309484 0.314138
321 1 0.00472738 0.249669 0.263241
292 1 -3.48524e-05 0.18787 0.317446
192 1 0.86915 0.189475 0.211274
223 1 0.950325 0.240918 0.198691
224 1 0.869733 0.294903 0.184127
318 1 0.939829 0.185931 0.252912
320 1 0.870373 0.182399 0.309988
349 1 0.861931 0.262995 0.258256
350 1 0.937321 0.291816 0.255468
352 1 0.873818 0.316685 0.309806
351 1 0.935468 0.241755 0.313017
1553 1 0.511316 0.49911 0.496922
101 1 0.123586 0.382663 0.0058635
227 1 0.0591683 0.372661 0.185434
232 1 0.127525 0.439844 0.182481
354 1 0.0590146 0.430447 0.251999
355 1 0.0507042 0.380194 0.313365
357 1 0.115009 0.375403 0.239623
360 1 0.124379 0.436125 0.306641
3 1 0.0569214 0.00126409 0.0601337
1161 1 0.235651 0.496119 0.118679
45 1 0.373823 0.134806 0.00641269
231 1 0.183001 0.37841 0.189905
236 1 0.241633 0.434285 0.189762
358 1 0.181967 0.439963 0.252226
361 1 0.267945 0.380422 0.248152
364 1 0.245792 0.435535 0.306225
363 1 0.323423 0.372394 0.317974
362 1 0.307359 0.447638 0.241571
235 1 0.321659 0.37203 0.193938
359 1 0.181426 0.381278 0.307363
514 1 0.0722279 0.065593 0.494081
622 1 0.439223 0.426951 0.481953
240 1 0.367489 0.429214 0.18921
239 1 0.424896 0.369324 0.187921
365 1 0.379354 0.368766 0.257408
366 1 0.436154 0.450428 0.248841
367 1 0.437607 0.368689 0.315361
368 1 0.379672 0.431514 0.318504
244 1 0.509221 0.435704 0.183107
372 1 0.499926 0.446236 0.304935
369 1 0.499547 0.384687 0.252067
243 1 0.552897 0.358223 0.186542
248 1 0.629755 0.432905 0.188826
370 1 0.560367 0.435859 0.245465
371 1 0.568381 0.381061 0.310445
373 1 0.620329 0.375681 0.24783
376 1 0.635822 0.463798 0.302935
509 1 0.86504 0.356649 0.383999
374 1 0.686696 0.442238 0.24301
375 1 0.691934 0.375482 0.312211
247 1 0.691398 0.379844 0.176753
252 1 0.748887 0.443305 0.176101
377 1 0.738038 0.378944 0.236258
380 1 0.747421 0.454819 0.328098
378 1 0.812566 0.436518 0.244632
379 1 0.786022 0.379454 0.298561
251 1 0.815193 0.38675 0.182723
484 1 0.00266108 0.42695 0.427328
353 1 0.998238 0.360195 0.241094
356 1 0.993109 0.430081 0.329548
228 1 0.0107157 0.435179 0.175066
255 1 0.941546 0.374804 0.175594
256 1 0.885752 0.424401 0.189602
381 1 0.883979 0.356512 0.233483
382 1 0.957127 0.432555 0.243227
383 1 0.944983 0.368109 0.311968
384 1 0.866107 0.406348 0.313546
386 1 0.0680251 0.068533 0.379664
392 1 0.13419 0.0689766 0.43098
419 1 0.0655568 0.127146 0.442593
421 1 0.127483 0.126004 0.379446
388 1 -0.000852562 0.0605331 0.438945
417 1 0.00686336 0.122699 0.38276
525 1 0.366203 0.00590256 0.497599
554 1 0.339524 0.194979 0.485974
598 1 0.701731 0.31107 0.49765
295 1 0.183162 0.132046 0.31645
82 1 0.557418 0.311008 0.0067994
423 1 0.203405 0.122946 0.444606
390 1 0.201726 0.0619705 0.371967
394 1 0.317135 0.0455882 0.374177
396 1 0.260996 0.0541055 0.430731
425 1 0.241628 0.130419 0.376684
427 1 0.310506 0.118174 0.431324
10 1 0.32116 0.0607328 0.000550233
398 1 0.439534 0.0525346 0.368472
400 1 0.377848 0.0553987 0.422199
429 1 0.375383 0.113342 0.367778
431 1 0.448512 0.101567 0.443121
630 1 0.691118 0.42488 0.498634
433 1 0.498581 0.11914 0.367541
404 1 0.519597 0.0640253 0.436275
402 1 0.559267 0.0527202 0.358355
408 1 0.6227 0.0617705 0.442596
435 1 0.572995 0.123536 0.430971
437 1 0.622568 0.112706 0.365199
481 1 0.997341 0.358809 0.382754
1165 1 0.367818 0.497661 0.140442
1309 1 0.891404 0.490047 0.243884
562 1 0.561654 0.181484 0.48601
1423 1 0.430604 0.490676 0.430924
406 1 0.682333 0.0544716 0.388791
412 1 0.754501 0.0730608 0.441638
439 1 0.680973 0.124113 0.433829
441 1 0.740139 0.110888 0.372033
443 1 0.807228 0.13019 0.433752
410 1 0.797682 0.0560852 0.372701
257 1 0.00295484 0.00763216 0.249672
89 1 0.757532 0.263667 0.0066509
319 1 0.946268 0.118886 0.314702
589 1 0.396991 0.249207 0.480039
414 1 0.943247 0.0660529 0.372274
416 1 0.87479 0.0469134 0.431722
445 1 0.872408 0.11783 0.360315
447 1 0.945373 0.129415 0.433991
601 1 0.749512 0.235626 0.498404
418 1 0.0553018 0.193968 0.386331
424 1 0.118797 0.183696 0.431976
450 1 0.0656293 0.315449 0.381068
451 1 0.0556372 0.259133 0.433577
453 1 0.127245 0.247558 0.384759
456 1 0.132483 0.312607 0.434768
610 1 0.0496199 0.44117 0.489946
503 1 0.685967 0.374213 0.431625
1289 1 0.238966 0.501693 0.251155
263 1 0.192968 0.0056835 0.300005
460 1 0.244439 0.306528 0.434564
428 1 0.255059 0.183217 0.443301
422 1 0.183996 0.196677 0.375745
426 1 0.316628 0.180418 0.371229
454 1 0.192119 0.315028 0.371582
455 1 0.189518 0.242617 0.454299
457 1 0.250268 0.243958 0.371817
458 1 0.298552 0.312051 0.360802
459 1 0.308851 0.250712 0.428288
1285 1 0.116481 0.491998 0.255369
506 1 0.812904 0.430023 0.370899
1417 1 0.249112 0.485407 0.377114
430 1 0.441701 0.199704 0.360128
432 1 0.389032 0.182223 0.425645
461 1 0.370803 0.245054 0.37149
462 1 0.454141 0.309045 0.373849
463 1 0.454277 0.246273 0.432995
464 1 0.372844 0.310179 0.428198
468 1 0.518381 0.310551 0.445361
436 1 0.492208 0.174056 0.430224
465 1 0.513761 0.253854 0.364145
440 1 0.620933 0.186598 0.437367
467 1 0.566655 0.245832 0.428722
469 1 0.621799 0.252399 0.372789
434 1 0.558688 0.181588 0.374504
466 1 0.563376 0.319869 0.365647
472 1 0.620616 0.310887 0.427869
505 1 0.743021 0.379794 0.382571
538 1 0.813374 0.0813273 0.497929
474 1 0.812251 0.288634 0.36542
471 1 0.691481 0.25158 0.440328
444 1 0.748957 0.18914 0.44206
473 1 0.748427 0.239163 0.367052
442 1 0.816563 0.187735 0.384058
438 1 0.678334 0.196422 0.382618
476 1 0.761744 0.31053 0.421663
470 1 0.676501 0.311353 0.374078
475 1 0.81564 0.243754 0.44874
1307 1 0.824716 0.487753 0.303139
449 1 -7.31866e-05 0.257654 0.379034
452 1 0.000553588 0.310402 0.442312
420 1 0.00423474 0.193711 0.437647
446 1 0.945693 0.185782 0.37554
480 1 0.879294 0.303477 0.443183
448 1 0.879126 0.178648 0.446186
478 1 0.932198 0.302689 0.367797
479 1 0.934984 0.240049 0.439826
477 1 0.882491 0.236655 0.372174
508 1 0.751799 0.438068 0.442169
1419 1 0.311483 0.502242 0.43804
488 1 0.139401 0.429364 0.447633
482 1 0.0748751 0.440477 0.379114
483 1 0.0609223 0.376964 0.441506
485 1 0.128904 0.377723 0.372408
512 1 0.886452 0.421826 0.435743
507 1 0.813739 0.379195 0.446697
502 1 0.68172 0.445788 0.359489
542 1 0.938851 0.0682873 0.488342
487 1 0.193327 0.373452 0.428016
490 1 0.324144 0.42509 0.378469
486 1 0.189241 0.433775 0.377087
489 1 0.24906 0.385354 0.369934
492 1 0.242173 0.44208 0.445249
491 1 0.30298 0.362211 0.435965
259 1 0.0751918 0.0118994 0.316801
590 1 0.435694 0.312226 0.498352
494 1 0.436055 0.433976 0.368849
495 1 0.462663 0.36695 0.430359
497 1 0.510472 0.380108 0.36046
496 1 0.368352 0.427843 0.439115
493 1 0.38457 0.37103 0.378788
501 1 0.630501 0.37889 0.365235
504 1 0.627284 0.440439 0.431882
500 1 0.51759 0.432052 0.438886
498 1 0.569345 0.451105 0.364118
499 1 0.571237 0.372667 0.436921
626 1 0.566143 0.446515 0.498248
545 1 0.00376218 0.121892 0.497779
285 1 0.876943 0.00418521 0.253792
1157 1 0.139656 0.497046 0.126973
385 1 0.0133585 0.00778122 0.372984
1409 1 0.995006 0.491869 0.37633
110 1 0.421707 0.444431 0.00793448
57 1 0.75032 0.13106 0.00662346
593 1 0.5067 0.24472 0.498846
565 1 0.630598 0.119456 0.498211
585 1 0.26582 0.241364 0.499759
25 1 0.753499 0.00893035 0.00308082
637 1 0.880594 0.360539 0.493989
558 1 0.439652 0.171778 0.496556
41 1 0.250472 0.126748 0.00466704
586 1 0.321745 0.30241 0.492484
105 1 0.265927 0.358039 0.00199529
121 1 0.748968 0.377222 0.00675747
602 1 0.805627 0.308463 0.497316
617 1 0.246973 0.361643 0.498163
520 1 0.119484 0.0721829 0.568929
547 1 0.0567147 0.120422 0.5675
642 1 0.0605527 0.0658549 0.627865
677 1 0.126463 0.135005 0.629719
516 1 -0.000793228 0.0511352 0.567637
98 1 0.0715222 0.448062 0.988917
646 1 0.195074 0.0655452 0.639861
524 1 0.254553 0.0688769 0.568231
551 1 0.192697 0.130853 0.569312
555 1 0.315257 0.119302 0.571498
650 1 0.326255 0.0668198 0.633068
681 1 0.257499 0.121413 0.62293
90 1 0.815844 0.30733 0.978163
528 1 0.379832 0.0697787 0.567435
559 1 0.444851 0.118971 0.571744
654 1 0.445887 0.056591 0.631284
685 1 0.389673 0.122225 0.625994
689 1 0.505996 0.116677 0.628854
532 1 0.501425 0.0619956 0.562994
536 1 0.623991 0.0552943 0.563254
563 1 0.565017 0.125033 0.561884
658 1 0.564152 0.0570042 0.630851
693 1 0.632805 0.115808 0.625878
18 1 0.551511 0.0590333 0.99471
5 1 0.130084 0.00953429 0.998606
671 1 0.931796 0.00305215 0.684179
614 1 0.19016 0.42383 0.515139
37 1 0.142579 0.121907 0.992345
540 1 0.756679 0.0708244 0.57039
567 1 0.677666 0.128052 0.55741
571 1 0.81729 0.125708 0.570236
662 1 0.691666 0.0608531 0.614085
666 1 0.810194 0.0663205 0.632979
697 1 0.747567 0.132373 0.62061
550 1 0.179995 0.171365 0.501344
673 1 0.998402 0.11608 0.624756
544 1 0.873249 0.0706477 0.567657
575 1 0.940873 0.122891 0.563819
670 1 0.92993 0.0599448 0.63007
701 1 0.889081 0.131842 0.623885
1689 1 0.736807 0.495821 0.636098
663 1 0.680879 0.00694437 0.683811
669 1 0.872443 0.00734611 0.626887
674 1 0.0554049 0.179878 0.624694
552 1 0.136652 0.184411 0.571996
579 1 0.0542564 0.239734 0.565753
584 1 0.113514 0.292945 0.573478
709 1 0.123614 0.242114 0.638961
706 1 0.0588879 0.310987 0.633783
580 1 0.995622 0.308554 0.558266
682 1 0.320274 0.18103 0.631023
678 1 0.200872 0.189542 0.624156
556 1 0.263569 0.181278 0.562752
583 1 0.197289 0.238821 0.549396
710 1 0.182885 0.295604 0.621175
713 1 0.245672 0.248958 0.617187
714 1 0.332149 0.303865 0.627787
587 1 0.315891 0.245045 0.565039
588 1 0.2565 0.29963 0.560426
577 1 0.00642403 0.246107 0.499577
522 1 0.309199 0.0694449 0.503449
686 1 0.445319 0.180423 0.630209
560 1 0.372853 0.174806 0.561585
591 1 0.428157 0.237125 0.556301
592 1 0.376767 0.302067 0.554258
717 1 0.377223 0.23427 0.624793
718 1 0.442728 0.295625 0.625064
721 1 0.502789 0.242329 0.632488
564 1 0.50846 0.184832 0.554721
596 1 0.518283 0.306729 0.563472
568 1 0.620147 0.187872 0.569561
595 1 0.55531 0.242525 0.57076
600 1 0.624536 0.31152 0.555745
690 1 0.558964 0.182211 0.632972
722 1 0.562203 0.308242 0.633951
725 1 0.631229 0.251034 0.630584
1565 1 0.875942 0.479427 0.499035
33 1 0.99181 0.132856 0.999343
526 1 0.436278 0.0636613 0.517299
1547 1 0.321973 0.495016 0.556104
694 1 0.687564 0.190329 0.626284
572 1 0.747246 0.196711 0.568958
599 1 0.684709 0.253102 0.554427
604 1 0.749972 0.308596 0.561346
729 1 0.736712 0.256784 0.631543
726 1 0.676773 0.32416 0.625437
698 1 0.802341 0.198614 0.638934
603 1 0.816013 0.251538 0.569909
730 1 0.811119 0.319315 0.629385
1947 1 0.827035 0.495085 0.934463
661 1 0.626559 0.000484791 0.627034
1793 1 0.996006 0.484571 0.754541
548 1 0.0042625 0.185055 0.564697
705 1 0.00795119 0.247313 0.633949
576 1 0.880031 0.183654 0.562893
607 1 0.945213 0.250019 0.574366
608 1 0.878752 0.304551 0.573346
702 1 0.945816 0.188006 0.634644
733 1 0.87456 0.241515 0.638212
734 1 0.939441 0.319223 0.637954
582 1 0.191731 0.305239 0.497595
114 1 0.57076 0.449494 0.997252
569 1 0.753676 0.125724 0.51479
611 1 0.0589659 0.371155 0.552918
616 1 0.110959 0.43862 0.561642
738 1 0.0591427 0.421152 0.63104
741 1 0.126098 0.366226 0.609
737 1 0.00519757 0.364963 0.617639
42 1 0.308816 0.189912 1.00184
1801 1 0.241194 0.491972 0.752468
549 1 0.124714 0.123612 0.504035
638 1 0.9427 0.423314 0.500605
1795 1 0.0798952 0.50077 0.806352
615 1 0.189443 0.350305 0.561701
619 1 0.32632 0.365718 0.556359
620 1 0.261696 0.425858 0.555777
742 1 0.200024 0.431295 0.618819
745 1 0.262514 0.358467 0.615278
746 1 0.321881 0.423414 0.609587
62 1 0.936565 0.194836 0.995879
927 1 0.927834 0.0050196 0.930338
647 1 0.194767 0.00308285 0.686708
623 1 0.446301 0.356272 0.575178
624 1 0.384693 0.429706 0.558568
749 1 0.388014 0.368198 0.63455
750 1 0.427655 0.443877 0.637205
789 1 0.633119 0.0113925 0.743067
118 1 0.693371 0.433829 0.99837
793 1 0.746829 0.00912197 0.749596
530 1 0.560277 0.0653703 0.502939
541 1 0.87264 0.000483334 0.499698
628 1 0.479452 0.42871 0.552224
753 1 0.508283 0.371353 0.616624
627 1 0.5725 0.371505 0.560323
632 1 0.618899 0.441187 0.566306
754 1 0.536276 0.447316 0.603302
757 1 0.620296 0.387388 0.620955
625 1 0.505561 0.370067 0.509726
631 1 0.685935 0.370053 0.560614
636 1 0.742775 0.431522 0.56053
758 1 0.668282 0.441908 0.630658
761 1 0.744234 0.378059 0.613696
762 1 0.803772 0.438264 0.626331
635 1 0.829306 0.362481 0.563769
919 1 0.686683 0.00288297 0.948113
897 1 1.00126 0.0152432 0.867365
1671 1 0.184022 0.484898 0.672949
1665 1 0.988401 0.491262 0.650882
612 1 0.993274 0.431294 0.570367
639 1 0.93155 0.362352 0.560425
640 1 0.881995 0.430557 0.558822
765 1 0.879353 0.373671 0.623683
766 1 0.930571 0.434037 0.639399
1021 1 0.863251 0.380186 0.875939
46 1 0.437599 0.182515 0.994737
675 1 0.0695834 0.123738 0.68783
648 1 0.12634 0.0582919 0.683276
770 1 0.0648734 0.0546171 0.761085
805 1 0.111493 0.125689 0.763015
776 1 0.130526 0.0643865 0.817327
803 1 0.0582228 0.124866 0.821419
518 1 0.189343 0.0678568 0.522961
772 1 0.999197 0.0728225 0.809949
613 1 0.12934 0.372237 0.509516
774 1 0.205574 0.0581839 0.746462
679 1 0.176286 0.13379 0.694977
807 1 0.187328 0.128922 0.800503
652 1 0.263206 0.0536113 0.693804
683 1 0.319462 0.132762 0.691907
778 1 0.310093 0.0664434 0.750302
780 1 0.251672 0.0680278 0.818581
809 1 0.257045 0.130103 0.751678
811 1 0.308953 0.129778 0.815355
1927 1 0.177492 0.490945 0.944433
687 1 0.437809 0.135074 0.701602
656 1 0.381683 0.0572173 0.691041
782 1 0.445393 0.0600293 0.763184
784 1 0.365069 0.0587097 0.810888
813 1 0.379319 0.12349 0.756901
815 1 0.443808 0.121919 0.814987
788 1 0.506568 0.0555537 0.820425
923 1 0.807105 0.0107831 0.940682
660 1 0.501312 0.0614201 0.690428
817 1 0.502402 0.128513 0.757558
664 1 0.618135 0.0663291 0.690149
691 1 0.56004 0.125495 0.691233
786 1 0.56466 0.068407 0.755709
819 1 0.560328 0.122455 0.815474
821 1 0.626228 0.125539 0.762982
792 1 0.620942 0.0691399 0.816997
539 1 0.815726 0.00343228 0.561163
1019 1 0.801515 0.381963 0.942037
1024 1 0.890191 0.442669 0.938776
1929 1 0.250897 0.500074 0.883178
695 1 0.69099 0.119127 0.697062
699 1 0.803814 0.134229 0.681247
790 1 0.688727 0.0629733 0.751189
668 1 0.743568 0.0677211 0.67635
794 1 0.808233 0.0585816 0.750065
796 1 0.750273 0.063013 0.80975
823 1 0.683157 0.124458 0.813009
825 1 0.74964 0.121352 0.742366
827 1 0.80306 0.130363 0.813486
529 1 0.501183 0.00648414 0.506452
1667 1 0.0702545 0.491452 0.701943
801 1 0.017878 0.12229 0.751484
644 1 0.00780911 0.066073 0.700848
672 1 0.870123 0.0748916 0.695052
703 1 0.94919 0.115635 0.707172
798 1 0.94061 0.0514792 0.753121
800 1 0.86633 0.0578959 0.811779
829 1 0.868083 0.124995 0.7665
831 1 0.940735 0.125737 0.813735
707 1 0.0669627 0.25393 0.688929
712 1 0.123995 0.319381 0.677755
808 1 0.133606 0.193717 0.81744
680 1 0.126292 0.191531 0.697509
802 1 0.0580881 0.194035 0.756614
834 1 0.0539715 0.30961 0.754408
835 1 0.0702148 0.251113 0.81234
837 1 0.122527 0.258933 0.750769
840 1 0.125237 0.310903 0.824252
716 1 0.270661 0.320983 0.684817
715 1 0.313564 0.241916 0.690276
839 1 0.185474 0.259669 0.829304
684 1 0.251713 0.202551 0.681179
711 1 0.18955 0.253114 0.689188
806 1 0.189343 0.198419 0.745919
810 1 0.320624 0.191992 0.753735
838 1 0.190592 0.313895 0.744657
841 1 0.250874 0.243018 0.761431
842 1 0.31399 0.295308 0.746015
844 1 0.254736 0.310935 0.808023
843 1 0.307189 0.253893 0.811603
812 1 0.247307 0.187653 0.814995
720 1 0.383342 0.306655 0.690439
688 1 0.378867 0.195841 0.698353
719 1 0.453679 0.25657 0.69955
814 1 0.452301 0.194312 0.760686
816 1 0.376947 0.178423 0.827594
845 1 0.387651 0.249562 0.751261
846 1 0.440361 0.315926 0.749525
848 1 0.379811 0.315076 0.802185
847 1 0.43264 0.251706 0.817377
724 1 0.505445 0.308845 0.68375
852 1 0.500485 0.306224 0.817565
820 1 0.49086 0.194048 0.829385
692 1 0.499192 0.179909 0.686996
849 1 0.51071 0.245278 0.763146
818 1 0.570153 0.182568 0.741173
696 1 0.641398 0.182927 0.685012
723 1 0.567049 0.250101 0.692472
824 1 0.628044 0.196963 0.823274
853 1 0.634362 0.239732 0.758002
856 1 0.621285 0.320019 0.814337
850 1 0.55851 0.306778 0.74992
851 1 0.566694 0.256725 0.825507
728 1 0.632984 0.323653 0.692285
822 1 0.68885 0.184818 0.757259
700 1 0.743836 0.191384 0.697881
826 1 0.818976 0.184078 0.753628
727 1 0.686144 0.258258 0.689294
731 1 0.815606 0.247323 0.702101
854 1 0.675718 0.307933 0.752753
857 1 0.746206 0.258693 0.746633
859 1 0.802904 0.261844 0.8141
860 1 0.73886 0.321248 0.826298
828 1 0.749299 0.198793 0.817685
855 1 0.69081 0.25198 0.814816
858 1 0.799329 0.320465 0.762513
732 1 0.7399 0.32227 0.693956
708 1 0.998825 0.306306 0.69385
804 1 0.996802 0.181345 0.8063
836 1 0.996772 0.311601 0.818859
676 1 0.00684498 0.181565 0.692723
833 1 -0.000332978 0.259737 0.760563
735 1 0.944316 0.251927 0.7048
830 1 0.937192 0.18658 0.755637
704 1 0.871334 0.181069 0.698997
736 1 0.872321 0.318292 0.682439
832 1 0.866169 0.185238 0.810259
861 1 0.877688 0.25643 0.744652
864 1 0.870367 0.324343 0.817676
863 1 0.924774 0.252404 0.803592
862 1 0.937565 0.3101 0.753295
34 1 0.0817242 0.180423 0.996098
739 1 0.0514445 0.373116 0.701635
744 1 0.135686 0.404834 0.689589
866 1 0.0677881 0.4354 0.757566
869 1 0.127634 0.364291 0.755621
872 1 0.136041 0.439361 0.798672
867 1 0.0757071 0.377875 0.810963
865 1 0.995713 0.373477 0.768842
1543 1 0.187994 0.484944 0.56052
1022 1 0.937146 0.428828 0.861333
899 1 0.0552067 -0.00136842 0.929307
633 1 0.75307 0.379665 0.504484
743 1 0.19695 0.361827 0.663307
747 1 0.310986 0.38961 0.685953
748 1 0.247876 0.439201 0.693589
870 1 0.18863 0.425039 0.746142
871 1 0.195549 0.363326 0.808324
873 1 0.250941 0.372209 0.749786
874 1 0.307901 0.439774 0.753559
875 1 0.321365 0.369122 0.805342
876 1 0.245957 0.428628 0.815104
1799 1 0.197322 0.496355 0.823153
109 1 0.377572 0.375775 1.00226
797 1 0.864488 0.00739925 0.740892
751 1 0.453706 0.370432 0.691569
752 1 0.374416 0.443539 0.701449
877 1 0.371672 0.371114 0.741139
878 1 0.449601 0.436262 0.735531
879 1 0.434897 0.37147 0.802382
880 1 0.38348 0.438485 0.808559
756 1 0.510034 0.43142 0.682118
1017 1 0.765587 0.386833 0.870317
881 1 0.510223 0.372983 0.748723
884 1 0.495834 0.429418 0.806619
755 1 0.563993 0.377886 0.681078
760 1 0.611572 0.446979 0.694808
882 1 0.55866 0.439022 0.751724
883 1 0.554917 0.375676 0.828504
885 1 0.617291 0.381724 0.757394
888 1 0.615151 0.440755 0.812042
887 1 0.68787 0.380674 0.816935
759 1 0.673878 0.391367 0.685545
763 1 0.797642 0.370948 0.689143
764 1 0.741408 0.435976 0.697203
886 1 0.67265 0.436283 0.752501
889 1 0.735456 0.374061 0.750919
890 1 0.794701 0.423626 0.745325
892 1 0.7459 0.440172 0.802061
891 1 0.809663 0.377387 0.809343
125 1 0.874302 0.367578 0.990146
740 1 0.995782 0.426671 0.690559
868 1 0.00494423 0.434499 0.811835
767 1 0.929546 0.367931 0.700073
768 1 0.858297 0.434147 0.685045
893 1 0.860196 0.378294 0.74083
894 1 0.924764 0.430266 0.748432
895 1 0.925601 0.374401 0.81032
896 1 0.858918 0.439914 0.810478
1020 1 0.752309 0.446778 0.931014
643 1 0.0672311 0.00676494 0.701434
1675 1 0.317221 0.484658 0.663423
898 1 0.0613656 0.0630173 0.868226
904 1 0.123655 0.0751259 0.921774
931 1 0.0694937 0.135335 0.930972
933 1 0.138094 0.135103 0.870813
929 1 0.00236131 0.130199 0.877928
900 1 0.994383 0.0749211 0.942648
523 1 0.317054 0.014835 0.565368
795 1 0.80383 0.0104227 0.821815
902 1 0.191486 0.0644111 0.882345
908 1 0.260042 0.0628228 0.941534
935 1 0.197159 0.130472 0.930539
937 1 0.25864 0.125522 0.876892
939 1 0.311682 0.122772 0.93632
906 1 0.309172 0.061501 0.867961
657 1 0.503991 0.00918264 0.628989
1817 1 0.74529 0.500766 0.74829
26 1 0.812734 0.0634912 0.996121
535 1 0.689673 0.00759371 0.555853
910 1 0.443395 0.0481133 0.869012
912 1 0.384922 0.0527096 0.936597
941 1 0.379162 0.112514 0.877704
943 1 0.437362 0.112478 0.95304
945 1 0.496657 0.123595 0.880795
916 1 0.503603 0.0624516 0.933307
925 1 0.854719 0.00452411 0.878801
1551 1 0.427879 0.490271 0.565312
1023 1 0.939769 0.374638 0.925577
93 1 0.874642 0.254823 0.987877
1925 1 0.13666 0.497285 0.870083
914 1 0.561651 0.0579226 0.876146
920 1 0.615684 0.0519085 0.935807
947 1 0.559551 0.123682 0.931625
949 1 0.616019 0.124332 0.87368
1537 1 0.997004 0.495193 0.505349
73 1 0.252002 0.248931 0.996833
573 1 0.884574 0.122166 0.502371
1005 1 0.372345 0.380741 0.870639
1669 1 0.115979 0.493673 0.636089
1016 1 0.631721 0.444565 0.939791
1009 1 0.488395 0.368177 0.87686
918 1 0.676665 0.0653261 0.884339
924 1 0.739446 0.0646824 0.937612
951 1 0.677455 0.129606 0.938196
953 1 0.741824 0.128242 0.879599
922 1 0.807514 0.0727506 0.876135
955 1 0.804646 0.12974 0.943862
22 1 0.675882 0.065562 0.997967
1695 1 0.927657 0.491638 0.700228
1014 1 0.689265 0.444283 0.861571
1006 1 0.438934 0.450173 0.862196
781 1 0.371686 0.00500199 0.750594
94 1 0.942767 0.308913 0.993551
926 1 0.92827 0.0655167 0.877046
928 1 0.865281 0.0710357 0.929173
957 1 0.868745 0.12817 0.872464
959 1 0.938038 0.133409 0.924102
1545 1 0.256376 0.497637 0.503224
659 1 0.561049 0.00831303 0.691219
930 1 0.0543391 0.20128 0.865714
936 1 0.132334 0.188641 0.939134
963 1 0.0688132 0.254779 0.941527
965 1 0.120503 0.257652 0.886449
968 1 0.129665 0.322635 0.937982
962 1 0.051998 0.307924 0.880302
932 1 0.997741 0.198185 0.941885
964 1 0.997716 0.312824 0.939553
1567 1 0.938794 0.487399 0.561508
966 1 0.191549 0.319672 0.88808
934 1 0.197489 0.191123 0.880979
938 1 0.311915 0.195425 0.875408
940 1 0.259947 0.18993 0.941708
967 1 0.191033 0.253178 0.93274
969 1 0.251958 0.254364 0.872047
970 1 0.32039 0.311859 0.862201
971 1 0.31365 0.25703 0.938101
972 1 0.258339 0.308182 0.938376
533 1 0.6129 0.00353819 0.506633
2 1 0.0647357 0.0502504 0.99087
976 1 0.363557 0.321856 0.938613
973 1 0.375356 0.250306 0.864665
944 1 0.368819 0.179409 0.9388
942 1 0.441374 0.175044 0.895025
975 1 0.43699 0.253035 0.937368
974 1 0.431345 0.310733 0.875805
948 1 0.503838 0.197166 0.943427
977 1 0.497884 0.254112 0.885279
1012 1 0.499641 0.460274 0.935019
1941 1 0.627182 0.497764 0.869112
980 1 0.50252 0.314056 0.931585
946 1 0.555033 0.1845 0.876698
984 1 0.626371 0.306688 0.946601
978 1 0.566051 0.319889 0.879251
981 1 0.617845 0.256429 0.89202
979 1 0.560126 0.250232 0.942411
952 1 0.619104 0.183156 0.935559
1008 1 0.385711 0.438677 0.931999
1011 1 0.559626 0.380224 0.940523
983 1 0.681466 0.235702 0.9456
988 1 0.740978 0.307487 0.941992
954 1 0.810894 0.192618 0.886828
982 1 0.682331 0.309511 0.887534
986 1 0.799991 0.301807 0.889284
987 1 0.808134 0.24226 0.942136
985 1 0.737115 0.245808 0.880955
950 1 0.687989 0.186515 0.876156
956 1 0.742123 0.187264 0.948549
961 1 0.988083 0.244268 0.851732
960 1 0.875745 0.190876 0.936923
992 1 0.882739 0.311569 0.935379
991 1 0.939487 0.247349 0.929848
990 1 0.932192 0.312302 0.868165
989 1 0.868313 0.255012 0.879209
958 1 0.927933 0.191616 0.863856
1010 1 0.556412 0.442261 0.874649
570 1 0.813858 0.176244 0.500264
66 1 0.0698777 0.327025 1.00156
993 1 0.99814 0.373222 0.870495
996 1 0.00811803 0.457013 0.922
995 1 0.0698222 0.377218 0.933566
1000 1 0.128727 0.428116 0.938659
994 1 0.0785669 0.439214 0.869247
997 1 0.13216 0.373596 0.872516
1015 1 0.691528 0.379211 0.931262
117 1 0.625398 0.381386 0.992024
998 1 0.186941 0.432743 0.880603
1018 1 0.81509 0.449319 0.877918
999 1 0.200754 0.378231 0.94332
553 1 0.24495 0.124791 0.512439
1003 1 0.314347 0.378686 0.927802
1004 1 0.253811 0.440098 0.940141
1001 1 0.255648 0.366124 0.875598
1002 1 0.330136 0.451289 0.872533
1013 1 0.626954 0.383265 0.876812
665 1 0.739597 0.00308836 0.626055
1007 1 0.439974 0.375165 0.932601
667 1 0.798982 0.00391006 0.689776
651 1 0.32286 0.00444817 0.683585
581 1 0.120313 0.236194 0.519289
61 1 0.869894 0.124039 0.984871
69 1 0.132798 0.24778 0.991381
769 1 0.00247514 0.00606499 0.756186
641 1 0.994021 0.00782668 0.636512
921 1 0.739977 0.0133991 0.873053
645 1 0.125911 0.0133496 0.62217
609 1 0.993693 0.368105 0.499501
534 1 0.694205 0.0644564 0.508526
634 1 0.802983 0.442205 0.511319
537 1 0.755935 0.00980972 0.497739
1687 1 0.670713 0.497372 0.68666
58 1 0.821456 0.18816 0.995762
543 1 0.933483 0.00718876 0.565534
515 1 0.0687268 0.00211202 0.558718
81 1 0.49411 0.259264 0.995162
605 1 0.871823 0.238154 0.507667
1539 1 0.0479169 0.493943 0.591345
787 1 0.571082 0.000748198 0.811635
6 1 0.198467 0.0532124 0.998825
54 1 0.675001 0.178463 0.999837
1673 1 0.248195 0.492862 0.621208
1823 1 0.927538 0.491391 0.808734
649 1 0.250452 0.00714321 0.624116
30 1 0.933437 0.0523442 0.996387
653 1 0.37895 0.00997412 0.61564
113 1 0.502067 0.376188 0.994431
561 1 0.498244 0.119812 0.507348
70 1 0.186391 0.308723 0.997168
597 1 0.61723 0.247419 0.502972
557 1 0.370899 0.124992 0.50278
1685 1 0.589002 0.497343 0.63002
1033 1 0.244702 0.498192 0.995414
799 1 0.929589 0.0045761 0.830962
773 1 0.129405 0.00972938 0.757891
531 1 0.564092 0.00327346 0.569308
917 1 0.630032 0.00269852 0.872503
78 1 0.439115 0.321842 0.996373
97 1 0.00461453 0.374074 0.998675
53 1 0.61427 0.117742 0.997112
86 1 0.682292 0.317677 0.996064
1557 1 0.62951 0.487546 0.503293
29 1 0.866811 0.00194934 0.987609
574 1 0.944124 0.188366 0.502859
1541 1 0.119126 0.497553 0.504977
106 1 0.305832 0.428343 0.993804
77 1 0.375994 0.239108 0.993437
517 1 0.137691 0.000757272 0.511744
629 1 0.637312 0.369592 0.500184
1032 1 0.133235 0.561034 0.0650331
1059 1 0.0586676 0.637557 0.0726162
1154 1 0.0739497 0.580621 0.127382
1189 1 0.131222 0.62961 0.121198
1550 1 0.439669 0.54319 0.492639
513 1 1.00283 0.998181 0.501329
1114 1 0.817906 0.815522 0.00978496
269 1 0.373546 0.995092 0.256814
1036 1 0.241128 0.560142 0.0493492
1063 1 0.191115 0.630051 0.0563151
1158 1 0.186827 0.57201 0.127304
1162 1 0.300596 0.561395 0.123536
1193 1 0.253364 0.637328 0.126995
1067 1 0.301713 0.612293 0.0517501
1625 1 0.76096 0.75115 0.488216
1295 1 0.422838 0.507177 0.316779
1040 1 0.375127 0.543647 0.0710112
1071 1 0.437419 0.619215 0.0694525
1166 1 0.433408 0.560372 0.130668
1197 1 0.370957 0.622012 0.124942
1201 1 0.494322 0.631289 0.12229
1505 1 0.000407664 0.883093 0.378181
1175 1 0.687789 0.504505 0.178251
1055 1 0.950092 0.507398 0.0623324
1044 1 0.495512 0.563475 0.0650434
1048 1 0.63466 0.569894 0.0551154
1075 1 0.562129 0.612496 0.0498027
1170 1 0.573004 0.562863 0.116387
1205 1 0.629269 0.630995 0.117921
1411 1 0.0612997 0.498157 0.442046
151 1 0.687651 0.990553 0.16032
1605 1 0.104331 0.760778 0.493799
1050 1 0.819265 0.572087 0.0114147
139 1 0.322415 0.998076 0.194765
1079 1 0.687893 0.634514 0.0755269
1052 1 0.753471 0.572607 0.0574249
1083 1 0.813201 0.628993 0.0736585
1174 1 0.687942 0.566189 0.126013
1178 1 0.813546 0.553115 0.120907
1209 1 0.751518 0.624505 0.126565
1641 1 0.245393 0.878659 0.492431
415 1 0.942326 0.998241 0.442503
1028 1 0.00816142 0.565529 0.0751015
1185 1 0.00180562 0.631734 0.126592
1056 1 0.879923 0.565408 0.0750482
1087 1 0.951882 0.632446 0.0645866
1182 1 0.942712 0.568383 0.124745
1213 1 0.885437 0.634596 0.122144
413 1 0.886334 0.99818 0.376267
1163 1 0.300421 0.508392 0.18435
1106 1 0.557258 0.820564 0.00666078
1598 1 0.93495 0.690689 0.495428
1064 1 0.127839 0.693499 0.0685189
1091 1 0.0699725 0.764429 0.0740933
1186 1 0.0614845 0.698755 0.128874
1221 1 0.129525 0.746583 0.133732
1096 1 0.136137 0.808968 0.0632818
1218 1 0.0523898 0.826014 0.124584
1060 1 0.995243 0.701652 0.0718728
1092 1 0.995025 0.822291 0.0655567
1281 1 0.992782 0.502444 0.244237
1194 1 0.315522 0.701094 0.130761
1068 1 0.245301 0.689668 0.0684342
1095 1 0.195861 0.755594 0.0599096
1099 1 0.308075 0.751091 0.0679935
1190 1 0.190031 0.683934 0.121778
1222 1 0.194195 0.82527 0.127545
1225 1 0.25113 0.751185 0.122934
1100 1 0.254734 0.810956 0.0617377
1226 1 0.305526 0.808524 0.125233
1601 1 0.00648317 0.750225 0.497759
1072 1 0.361005 0.679988 0.0676568
1103 1 0.442766 0.745477 0.0579489
1104 1 0.372086 0.824643 0.0675941
1198 1 0.427271 0.679658 0.120866
1229 1 0.368399 0.760499 0.119022
1230 1 0.446162 0.806941 0.130616
1076 1 0.485598 0.68114 0.0617381
1233 1 0.507083 0.75477 0.12522
1108 1 0.498319 0.814404 0.0755306
27 1 0.815712 0.999977 0.0587262
1080 1 0.6163 0.669915 0.0458205
1107 1 0.550679 0.754196 0.0576603
1202 1 0.564899 0.678069 0.123355
1234 1 0.569116 0.813227 0.129211
1237 1 0.619634 0.749381 0.120215
1112 1 0.625733 0.804603 0.0626961
1653 1 0.630191 0.865702 0.498885
1084 1 0.756306 0.673925 0.0606281
1206 1 0.680165 0.698047 0.122837
1111 1 0.687163 0.739551 0.0548369
1115 1 0.816287 0.747896 0.071034
1116 1 0.743666 0.818373 0.0639308
1210 1 0.810691 0.693285 0.133089
1238 1 0.691879 0.809235 0.122353
1241 1 0.746861 0.750536 0.129485
1242 1 0.81633 0.813066 0.104476
1590 1 0.702385 0.693742 0.495736
1217 1 0.00655718 0.756247 0.131706
1214 1 0.939987 0.691333 0.124311
1088 1 0.871121 0.694066 0.0678465
1119 1 0.937299 0.752056 0.0664051
1120 1 0.886809 0.807588 0.0580872
1245 1 0.879082 0.750244 0.136912
1246 1 0.939288 0.80936 0.126067
1070 1 0.42958 0.686464 0.00650336
1123 1 0.0596838 0.887568 0.0663159
1128 1 0.122979 0.944939 0.0572074
1250 1 0.0666657 0.943325 0.12586
1253 1 0.121937 0.874647 0.114281
1047 1 0.68657 0.505201 0.0554006
1124 1 0.0031593 0.93658 0.0666966
287 1 0.937224 0.994572 0.306754
1536 1 0.869799 0.951279 0.430329
133 1 0.122048 0.996789 0.136105
1586 1 0.570195 0.707681 0.499556
1027 1 0.0640324 0.513958 0.0707043
1127 1 0.190401 0.87645 0.0657779
1131 1 0.31179 0.874753 0.0635236
1132 1 0.245847 0.935893 0.0728158
1254 1 0.182995 0.940973 0.129101
1257 1 0.258976 0.876393 0.13629
1258 1 0.32436 0.930958 0.132833
149 1 0.616507 0.992052 0.104045
1135 1 0.437595 0.867413 0.0654568
1136 1 0.365456 0.931483 0.0513803
1261 1 0.37006 0.866144 0.134021
1262 1 0.434643 0.939427 0.126459
1299 1 0.563008 0.501009 0.30015
281 1 0.745122 1.00039 0.26209
129 1 1.0006 0.993881 0.137773
1140 1 0.49759 0.929771 0.0649851
1265 1 0.498347 0.873021 0.13379
1139 1 0.552353 0.869766 0.0739318
1144 1 0.614542 0.931442 0.0570055
1266 1 0.565693 0.923317 0.125338
1269 1 0.626279 0.866736 0.117354
1549 1 0.377392 0.495945 0.497437
1057 1 0.0100486 0.643233 0.000380593
1143 1 0.684961 0.873324 0.0537199
1148 1 0.752381 0.936153 0.0626681
1270 1 0.676906 0.92777 0.105346
1273 1 0.742616 0.874345 0.125777
1274 1 0.81732 0.94831 0.132697
1147 1 0.808221 0.879711 0.0679622
1293 1 0.381731 0.502761 0.247338
1249 1 0.989633 0.889199 0.12485
1029 1 0.121273 0.501545 0.00901012
1618 1 0.557474 0.805777 0.496329
1151 1 0.929835 0.88225 0.0652406
1152 1 0.876235 0.952498 0.071984
1277 1 0.866018 0.877353 0.124521
1278 1 0.93645 0.940872 0.128817
1533 1 0.886348 0.891964 0.372833
1160 1 0.124317 0.55187 0.18899
1187 1 0.0596389 0.62767 0.190236
1282 1 0.0554381 0.552936 0.246384
1288 1 0.108349 0.57531 0.318683
1315 1 0.0401748 0.615772 0.323041
1317 1 0.120263 0.636554 0.248692
1156 1 0.999082 0.576276 0.195346
1191 1 0.177629 0.634601 0.190221
1319 1 0.176227 0.631448 0.306555
1164 1 0.239083 0.567913 0.19813
1195 1 0.314153 0.623003 0.180706
1286 1 0.169737 0.561683 0.265295
1292 1 0.237911 0.562719 0.306394
1321 1 0.244819 0.634852 0.250765
1323 1 0.305677 0.623025 0.301422
1290 1 0.306611 0.559565 0.250778
271 1 0.425484 0.985969 0.309516
1413 1 0.137925 0.501129 0.385215
1291 1 0.318934 0.510607 0.307187
1168 1 0.36209 0.565308 0.187143
1199 1 0.426877 0.623958 0.188477
1294 1 0.436035 0.559093 0.250092
1296 1 0.370406 0.570188 0.320039
1325 1 0.373233 0.620369 0.24623
1327 1 0.434114 0.622123 0.305384
1329 1 0.49739 0.621183 0.245113
1172 1 0.497511 0.561766 0.18752
1300 1 0.495549 0.560016 0.299613
137 1 0.251871 0.993853 0.127691
1102 1 0.443431 0.81075 0.00129525
1514 1 0.315598 0.927065 0.38465
1203 1 0.552921 0.619137 0.182589
1176 1 0.62598 0.568962 0.189452
1298 1 0.560023 0.565583 0.236581
1331 1 0.560577 0.624349 0.306652
1333 1 0.616072 0.623112 0.251498
1304 1 0.622794 0.553174 0.304471
1602 1 0.0522865 0.822853 0.486641
1511 1 0.181348 0.875148 0.433815
1303 1 0.697441 0.515133 0.315075
1510 1 0.182922 0.927564 0.370463
1153 1 0.00159339 0.510011 0.132246
1302 1 0.69164 0.571547 0.243349
1180 1 0.748706 0.556252 0.190707
1207 1 0.689478 0.620189 0.189493
1211 1 0.805694 0.6306 0.183435
1308 1 0.775273 0.56337 0.301551
1335 1 0.692102 0.615278 0.306401
1337 1 0.749545 0.63156 0.245447
1339 1 0.819866 0.635292 0.309812
1306 1 0.811308 0.55803 0.231348
1513 1 0.244071 0.864521 0.38595
1171 1 0.566545 0.500389 0.181876
1621 1 0.631923 0.762306 0.488169
1313 1 0.994634 0.631765 0.246013
1284 1 0.995696 0.551982 0.311974
1184 1 0.875629 0.547982 0.185209
1215 1 0.933226 0.632645 0.183578
1310 1 0.936715 0.562159 0.238991
1312 1 0.865766 0.561073 0.299948
1341 1 0.869597 0.620156 0.243452
1343 1 0.917332 0.619389 0.305895
1110 1 0.674344 0.813122 0.00331751
1219 1 0.0568602 0.751833 0.194018
1314 1 0.065473 0.693911 0.260349
1320 1 0.115569 0.687513 0.318674
1192 1 0.11853 0.692114 0.1844
1346 1 0.0543641 0.807616 0.255415
1349 1 0.127602 0.743393 0.256227
1347 1 0.0551803 0.74417 0.320359
1352 1 0.107359 0.802974 0.31769
1220 1 0.000596447 0.809414 0.188338
1224 1 0.116505 0.817024 0.180787
1196 1 0.250121 0.686047 0.187921
1324 1 0.259392 0.687834 0.310058
1223 1 0.193646 0.742251 0.191136
1228 1 0.25118 0.810718 0.181423
1227 1 0.313994 0.760278 0.197984
1318 1 0.188166 0.692449 0.251458
1322 1 0.319501 0.693092 0.262319
1350 1 0.194828 0.805546 0.253487
1353 1 0.249945 0.750372 0.261307
1355 1 0.310315 0.752825 0.317228
1354 1 0.305525 0.815172 0.263189
1351 1 0.187248 0.7546 0.309297
1356 1 0.249897 0.814006 0.319405
1232 1 0.379128 0.80414 0.194055
1328 1 0.381634 0.682087 0.307549
1200 1 0.373399 0.684068 0.19405
1231 1 0.43115 0.745856 0.179053
1326 1 0.447724 0.686786 0.248555
1357 1 0.38776 0.745944 0.255059
1358 1 0.441151 0.807455 0.245041
1359 1 0.444851 0.742305 0.313211
1360 1 0.38706 0.804325 0.302943
1204 1 0.494516 0.6813 0.190735
1332 1 0.493796 0.668262 0.314506
1361 1 0.495211 0.746843 0.237131
1364 1 0.506483 0.821925 0.306303
1236 1 0.506984 0.814988 0.182385
1208 1 0.627024 0.675718 0.187371
1240 1 0.643076 0.817108 0.185844
1235 1 0.569772 0.73934 0.179238
1330 1 0.558555 0.677535 0.237407
1336 1 0.625739 0.683716 0.308077
1363 1 0.553606 0.738302 0.306793
1365 1 0.616654 0.74081 0.24343
1368 1 0.629558 0.805763 0.314848
1362 1 0.555745 0.799449 0.236506
1338 1 0.818583 0.690248 0.250477
1212 1 0.747121 0.69403 0.192478
1334 1 0.680354 0.68186 0.245354
1239 1 0.686731 0.751977 0.184975
1244 1 0.749028 0.814725 0.190383
1366 1 0.686083 0.807072 0.256261
1367 1 0.701421 0.74206 0.304875
1369 1 0.754698 0.75545 0.253756
1370 1 0.809227 0.811373 0.258672
1371 1 0.822831 0.753225 0.318935
1340 1 0.762237 0.68408 0.304886
1243 1 0.814836 0.747858 0.192434
1372 1 0.753379 0.810408 0.320684
1348 1 1.0001 0.815635 0.314349
1188 1 0.00642665 0.694041 0.181044
1316 1 0.991819 0.685395 0.316389
1345 1 0.998703 0.74541 0.264242
1342 1 0.941162 0.693956 0.243705
1216 1 0.876939 0.684748 0.181306
1247 1 0.940871 0.747191 0.18817
1373 1 0.869395 0.751679 0.251497
1374 1 0.938376 0.808246 0.243504
1375 1 0.946689 0.747105 0.321212
1376 1 0.88743 0.814741 0.31907
1344 1 0.884929 0.695426 0.320544
1248 1 0.860768 0.81931 0.184841
1301 1 0.631698 0.506035 0.23618
1515 1 0.328513 0.859559 0.432246
1256 1 0.115988 0.927083 0.19205
1251 1 0.056614 0.872527 0.192329
1378 1 0.059712 0.943552 0.251805
1379 1 0.0548703 0.871643 0.320705
1381 1 0.104363 0.873459 0.257873
1384 1 0.128477 0.945254 0.314676
1377 1 0.999855 0.875273 0.253319
277 1 0.63119 0.986901 0.24403
1382 1 0.188505 0.935543 0.252037
1259 1 0.319568 0.870486 0.198392
1260 1 0.268826 0.936053 0.193895
1383 1 0.187897 0.864096 0.310039
1255 1 0.185078 0.87475 0.187069
1385 1 0.245109 0.872171 0.243107
1386 1 0.31854 0.928998 0.257499
1387 1 0.305869 0.869828 0.321704
1388 1 0.251641 0.927537 0.320532
7 1 0.198564 0.99612 0.0575
1530 1 0.80992 0.939724 0.369764
1043 1 0.562244 0.5123 0.0546134
1433 1 0.753229 0.505532 0.380009
1431 1 0.695405 0.508277 0.432965
1263 1 0.436833 0.881672 0.185239
1264 1 0.375752 0.93628 0.199638
1389 1 0.388833 0.872765 0.249295
1390 1 0.441112 0.940178 0.250485
1391 1 0.435147 0.867973 0.312681
1392 1 0.372969 0.923245 0.314112
1396 1 0.500718 0.92868 0.307549
1268 1 0.507309 0.944716 0.182893
15 1 0.426109 0.991152 0.0615466
393 1 0.240266 0.99118 0.372025
1169 1 0.512484 0.50253 0.119705
1393 1 0.501362 0.874548 0.246765
1272 1 0.625497 0.930828 0.168602
1267 1 0.568259 0.872729 0.181549
1394 1 0.566852 0.93386 0.239616
1395 1 0.579821 0.868334 0.300508
1397 1 0.629039 0.887544 0.238737
1400 1 0.627316 0.93338 0.307357
1137 1 0.497802 0.873823 0.0032286
1520 1 0.368146 0.933128 0.449441
1025 1 0.0158237 0.51308 0.00310311
1518 1 0.437449 0.930498 0.36552
131 1 0.0650039 1.00232 0.188958
1271 1 0.697443 0.869624 0.205361
1275 1 0.806957 0.876447 0.178995
1276 1 0.754934 0.933319 0.182547
1398 1 0.698634 0.941057 0.238672
1401 1 0.763326 0.87927 0.263544
1402 1 0.810581 0.929448 0.239313
1399 1 0.690096 0.874319 0.301068
1403 1 0.818765 0.867487 0.324803
1404 1 0.747818 0.938377 0.324636
1380 1 0.00366803 0.940854 0.317312
1523 1 0.564699 0.870109 0.446106
1252 1 0.00560034 0.929997 0.193947
1279 1 0.942155 0.874923 0.191388
1280 1 0.868857 0.943369 0.186003
1405 1 0.877198 0.867862 0.239851
1406 1 0.934559 0.930676 0.243732
1407 1 0.941966 0.880867 0.310797
1408 1 0.865163 0.934984 0.3126
1058 1 0.0693244 0.703718 0.0183677
1090 1 0.0559742 0.811251 0.00631994
1410 1 0.042862 0.558842 0.378389
1416 1 0.111456 0.567036 0.435008
1443 1 0.0426487 0.622204 0.449186
1445 1 0.117288 0.631849 0.37358
1516 1 0.244987 0.943954 0.430471
1414 1 0.175397 0.56856 0.361875
1447 1 0.177764 0.628261 0.440314
1449 1 0.242923 0.615297 0.371379
1451 1 0.309592 0.632092 0.432086
1420 1 0.231452 0.56416 0.434838
1418 1 0.302987 0.565798 0.372536
1528 1 0.622406 0.95027 0.449106
1519 1 0.419161 0.867605 0.442696
1069 1 0.371778 0.62248 0.0106544
1424 1 0.359625 0.57112 0.423186
1455 1 0.438769 0.617492 0.44561
1422 1 0.427495 0.56383 0.384787
1453 1 0.368762 0.631218 0.370317
1457 1 0.493041 0.616667 0.381092
1428 1 0.501729 0.559546 0.434977
1517 1 0.3723 0.857821 0.3719
13 1 0.378656 0.994098 -0.000898437
273 1 0.493627 0.998675 0.244443
1461 1 0.620828 0.625681 0.360262
1426 1 0.564159 0.57412 0.373229
1432 1 0.620649 0.559386 0.437669
1459 1 0.556569 0.621189 0.440926
1121 1 0.999149 0.881093 0.0101904
1522 1 0.563319 0.944453 0.368021
1535 1 0.925158 0.876444 0.438929
1532 1 0.768895 0.933039 0.440824
11 1 0.318685 0.988755 0.0723316
1525 1 0.629098 0.874014 0.368293
409 1 0.746377 0.99292 0.395857
1430 1 0.685249 0.571648 0.375767
1463 1 0.674916 0.623694 0.453933
1436 1 0.745899 0.573401 0.430938
1434 1 0.815125 0.561766 0.369813
1465 1 0.756727 0.624474 0.363823
1467 1 0.81923 0.624107 0.432056
1577 1 0.245904 0.6231 0.492816
135 1 0.185615 0.993839 0.190334
1441 1 0.983551 0.62484 0.381598
1412 1 0.993827 0.556068 0.440082
1471 1 0.929276 0.606929 0.432789
1469 1 0.879768 0.631488 0.371088
1440 1 0.872454 0.553947 0.448168
1438 1 0.933793 0.545717 0.371935
275 1 0.557671 0.992471 0.30573
1085 1 0.883255 0.636118 0.0169433
261 1 0.124433 0.99248 0.246341
1524 1 0.482088 0.931854 0.433937
1442 1 0.0502404 0.683779 0.382258
1475 1 0.0556208 0.755547 0.426243
1448 1 0.121797 0.692712 0.441504
1480 1 0.122575 0.817554 0.433904
1477 1 0.1219 0.745246 0.36833
1474 1 0.0565942 0.81947 0.374376
1444 1 0.997911 0.686139 0.439719
1038 1 0.432624 0.567 0.00907785
1529 1 0.756535 0.866175 0.37623
403 1 0.561492 0.996937 0.438605
1446 1 0.19177 0.685558 0.364184
1484 1 0.242112 0.802929 0.437542
1452 1 0.252949 0.684086 0.429663
1482 1 0.317545 0.802525 0.377749
1481 1 0.247063 0.746369 0.378159
1479 1 0.187372 0.73618 0.437359
1483 1 0.311614 0.749258 0.443591
1450 1 0.304705 0.679851 0.371667
1478 1 0.179555 0.798902 0.383845
1297 1 0.497809 0.500808 0.240999
1534 1 0.949825 0.944416 0.378567
1507 1 0.068096 0.880509 0.426625
1456 1 0.377173 0.69294 0.445917
1454 1 0.434328 0.678464 0.384414
1485 1 0.374005 0.726527 0.377668
1486 1 0.436721 0.801911 0.360903
1487 1 0.440483 0.748544 0.442983
1488 1 0.384652 0.801439 0.422989
1460 1 0.504099 0.687529 0.446584
1492 1 0.497055 0.815032 0.421832
1521 1 0.502428 0.879231 0.370169
1489 1 0.494012 0.739515 0.376803
1491 1 0.561769 0.754256 0.426809
1464 1 0.616987 0.680419 0.429702
1496 1 0.621169 0.814891 0.430857
1490 1 0.565071 0.808268 0.366689
1458 1 0.556428 0.680281 0.373598
1493 1 0.630485 0.75092 0.376642
1526 1 0.685538 0.940053 0.37344
1527 1 0.690616 0.863922 0.433177
1462 1 0.695422 0.679148 0.374252
1495 1 0.69057 0.756665 0.432418
1494 1 0.69614 0.800915 0.368994
1497 1 0.752612 0.748243 0.37851
1466 1 0.81696 0.692955 0.37424
1468 1 0.752415 0.684987 0.435497
1498 1 0.825884 0.804665 0.383675
1499 1 0.812949 0.748268 0.434662
1500 1 0.754718 0.809002 0.427472
1508 1 1.00222 0.93214 0.443844
1531 1 0.838776 0.874303 0.441805
1476 1 0.000856156 0.827275 0.433421
1473 1 0.998605 0.763281 0.379959
1503 1 0.945635 0.752704 0.441636
1504 1 0.884979 0.801329 0.453017
1470 1 0.943994 0.69155 0.377458
1501 1 0.876089 0.753669 0.378416
1472 1 0.885302 0.685537 0.434468
1502 1 0.942837 0.817203 0.374036
1155 1 0.0684256 0.500601 0.17778
1512 1 0.128278 0.949063 0.429777
1506 1 0.0620351 0.943838 0.380049
1509 1 0.121233 0.874431 0.363241
1159 1 0.192719 0.502829 0.180722
159 1 0.940083 0.998896 0.191973
1078 1 0.688184 0.675016 0.00145789
1287 1 0.180811 0.501548 0.32047
1086 1 0.941446 0.691557 0.00809947
1062 1 0.183397 0.702042 0.00587774
1435 1 0.799277 0.508407 0.451298
1129 1 0.240927 0.876132 0.00392808
267 1 0.313198 0.988181 0.31507
1101 1 0.367541 0.760817 0.0049146
397 1 0.380415 0.989645 0.379608
1054 1 0.950118 0.573178 0.00821139
1146 1 0.814166 0.934567 0.00810057
395 1 0.312314 0.994487 0.433568
1098 1 0.320981 0.822353 0.00180813
1425 1 0.49833 0.508944 0.362397
1118 1 0.939887 0.818994 -0.000637169
1145 1 0.7596 0.8725 0.00391901
1051 1 0.821815 0.508287 0.0551497
141 1 0.376337 0.993587 0.127887
145 1 0.497106 0.991163 0.121556
1610 1 0.306183 0.807197 0.493576
147 1 0.570846 0.993082 0.173029
1113 1 0.753949 0.751546 0.00926469
1637 1 0.114624 0.890239 0.486876
1581 1 0.371533 0.614468 0.485541
1429 1 0.636688 0.514651 0.37301
1626 1 0.808655 0.817852 0.493836
153 1 0.757336 0.99586 0.118092
1125 1 0.122904 0.868934 0.00765674
1638 1 0.183489 0.927924 0.492131
1181 1 0.887888 0.505015 0.119548
1031 1 0.183796 0.501276 0.0593412
1650 1 0.552711 0.933376 0.495678
1649 1 0.495795 0.879945 0.495413
1578 1 0.31168 0.677296 0.497131
1142 1 0.682859 0.933968 0.00299223
1617 1 0.500274 0.750047 0.496298
1662 1 0.932927 0.935847 0.49877
1041 1 0.494601 0.506405 0.00870656
1646 1 0.433205 0.943392 0.497395
1546 1 0.31653 0.55352 0.493133
1053 1 0.887049 0.520261 0.00890926
1554 1 0.562882 0.566209 0.496012
1066 1 0.306269 0.685622 0.00766841
1613 1 0.377064 0.763467 0.495608
1657 1 0.754245 0.869688 0.493538
1544 1 0.121665 0.545454 0.571509
1571 1 0.063624 0.618332 0.571809
1666 1 0.0628063 0.559336 0.631791
1701 1 0.12836 0.620363 0.621311
1815 1 0.690821 0.502217 0.805573
655 1 0.438788 0.99353 0.696012
1677 1 0.372802 0.505904 0.615743
1670 1 0.175156 0.55122 0.633289
1548 1 0.249473 0.562585 0.55321
1575 1 0.187504 0.618762 0.565768
1579 1 0.315511 0.615202 0.560737
1674 1 0.306723 0.555726 0.610175
1705 1 0.242289 0.597475 0.629283
1634 1 0.0615921 0.941919 0.5009
1642 1 0.323966 0.931165 0.508913
1552 1 0.376024 0.560946 0.558733
1583 1 0.435314 0.62624 0.563682
1678 1 0.428519 0.557057 0.630561
1709 1 0.376942 0.615762 0.617361
1093 1 0.124977 0.757322 0.990251
1094 1 0.183528 0.816186 0.994199
907 1 0.310108 0.999604 0.939133
1681 1 0.497049 0.510205 0.615667
1126 1 0.180284 0.934612 0.999037
1713 1 0.496748 0.624594 0.625214
1556 1 0.495687 0.566301 0.550633
1560 1 0.612712 0.57473 0.562055
1587 1 0.548683 0.63202 0.569533
1682 1 0.551243 0.569872 0.618884
1717 1 0.622074 0.623607 0.61942
1542 1 0.179123 0.569012 0.499036
1945 1 0.748661 0.503078 0.872272
1049 1 0.754762 0.511397 0.998387
1686 1 0.674149 0.556437 0.628631
1591 1 0.67083 0.625669 0.558122
1564 1 0.742824 0.551138 0.568639
1595 1 0.807555 0.631617 0.568772
1690 1 0.803111 0.565823 0.642341
1721 1 0.738181 0.621418 0.620602
1089 1 0.99698 0.750824 0.988435
785 1 0.501597 0.99317 0.766311
1540 1 0.991709 0.541316 0.572975
1697 1 0.003793 0.630757 0.624785
1568 1 0.886957 0.545359 0.572802
1599 1 0.929644 0.627991 0.582269
1694 1 0.929944 0.560461 0.637449
1725 1 0.867096 0.628719 0.628218
1594 1 0.810791 0.683801 0.503461
1597 1 0.880726 0.623946 0.506215
1576 1 0.122725 0.681758 0.562856
1603 1 0.0666914 0.743317 0.56425
1608 1 0.115876 0.833292 0.559628
1698 1 0.0714704 0.688873 0.630708
1730 1 0.0740393 0.813872 0.633165
1733 1 0.132838 0.745556 0.622476
1572 1 -0.000556281 0.68961 0.562839
1604 1 0.017896 0.826916 0.57073
1563 1 0.804538 0.500098 0.573575
1706 1 0.310297 0.663052 0.620551
1580 1 0.244078 0.668006 0.565804
1734 1 0.182354 0.816014 0.613306
1607 1 0.179102 0.764018 0.55542
1611 1 0.301484 0.750436 0.557633
1702 1 0.178468 0.679754 0.612239
1737 1 0.242568 0.75622 0.616045
1612 1 0.240354 0.823968 0.559697
1738 1 0.306877 0.810414 0.617815
21 1 0.616387 0.999011 1.00138
1939 1 0.558736 0.514593 0.921923
783 1 0.437036 0.997417 0.813286
1710 1 0.432011 0.683906 0.61647
1584 1 0.366267 0.678929 0.56088
1615 1 0.437671 0.759069 0.558858
1616 1 0.374146 0.815283 0.566781
1741 1 0.370327 0.746319 0.609901
1742 1 0.426676 0.812449 0.639123
1620 1 0.496526 0.82184 0.57332
1745 1 0.477117 0.7501 0.624268
1138 1 0.561235 0.930025 0.985825
1803 1 0.314777 0.506294 0.814359
1558 1 0.673205 0.55983 0.500578
1588 1 0.494905 0.695021 0.565324
1714 1 0.555178 0.693374 0.623648
1592 1 0.617948 0.698786 0.57071
1619 1 0.55472 0.748431 0.569215
1624 1 0.613347 0.810649 0.560916
1749 1 0.615425 0.752991 0.624358
1746 1 0.565519 0.817885 0.625453
1933 1 0.372707 0.518584 0.87092
1750 1 0.672646 0.817925 0.621548
1623 1 0.691297 0.752131 0.560673
1722 1 0.813058 0.694625 0.619634
1596 1 0.744836 0.679668 0.557279
1627 1 0.807008 0.753478 0.560398
1718 1 0.686482 0.691529 0.611673
1753 1 0.747623 0.743544 0.627421
1754 1 0.800194 0.819207 0.631896
1628 1 0.746602 0.817897 0.57384
1077 1 0.611278 0.621463 0.983004
1150 1 0.94327 0.940193 0.994561
1729 1 0.00888984 0.751953 0.618258
1600 1 0.875497 0.689167 0.571216
1631 1 0.941973 0.752289 0.564112
1632 1 0.881114 0.801619 0.565994
1726 1 0.950103 0.694073 0.629755
1757 1 0.866146 0.758128 0.62659
1758 1 0.94724 0.810832 0.634767
1046 1 0.683533 0.572403 0.989215
1805 1 0.357523 0.516155 0.741926
1117 1 0.878328 0.74766 1.0015
1635 1 0.0649191 0.899693 0.582867
1640 1 0.12668 0.940492 0.561636
1762 1 0.0629821 0.960842 0.635923
1765 1 0.134139 0.88912 0.625939
1636 1 1.0014 0.957494 0.566894
1585 1 0.492549 0.63826 0.509699
791 1 0.68283 0.984469 0.808128
519 1 0.194853 0.993809 0.566386
1065 1 0.247686 0.617587 0.99621
1030 1 0.175597 0.565255 0.979984
1811 1 0.554248 0.508686 0.819025
1133 1 0.38484 0.874308 0.998213
915 1 0.561452 0.994287 0.937055
1658 1 0.823704 0.934253 0.506845
1639 1 0.186577 0.885971 0.564005
1643 1 0.310415 0.876874 0.569834
1644 1 0.257177 0.941846 0.553909
1766 1 0.195955 0.944 0.628182
1769 1 0.248908 0.885643 0.621869
1770 1 0.306919 0.953591 0.629603
1589 1 0.615543 0.643728 0.506151
1821 1 0.861789 0.50271 0.75528
1645 1 0.388153 0.870794 0.512407
1691 1 0.807681 0.500714 0.689278
1807 1 0.416881 0.511742 0.805746
1647 1 0.436124 0.882545 0.580881
1648 1 0.373548 0.936779 0.571827
1773 1 0.375007 0.875972 0.635365
1774 1 0.4536 0.947215 0.626206
1777 1 0.507019 0.878509 0.629222
1652 1 0.508989 0.944656 0.563082
2047 1 0.933044 0.880949 0.942604
1797 1 0.148613 0.503145 0.748645
1651 1 0.564103 0.886411 0.559446
1656 1 0.630169 0.937883 0.551064
1778 1 0.556063 0.942109 0.630978
1781 1 0.61183 0.882144 0.625104
1931 1 0.317739 0.50168 0.937694
1629 1 0.873727 0.740938 0.50593
909 1 0.379052 0.998461 0.877808
2033 1 0.501867 0.873195 0.875425
1614 1 0.444948 0.811565 0.50599
1813 1 0.612454 0.503909 0.758155
2048 1 0.876905 0.93835 0.934652
1655 1 0.684886 0.874827 0.563533
1659 1 0.814761 0.876564 0.57863
1660 1 0.757306 0.938 0.573524
1782 1 0.690981 0.932633 0.615783
1785 1 0.740195 0.869027 0.627092
1786 1 0.816008 0.942039 0.626337
1573 1 0.119821 0.624897 0.50037
1633 1 0.00912669 0.88797 0.514299
2046 1 0.937101 0.937738 0.881597
1761 1 1.0029 0.879276 0.634081
1951 1 0.951043 0.516274 0.945131
1663 1 0.941191 0.877032 0.565033
1664 1 0.874973 0.943519 0.564538
1789 1 0.881217 0.867339 0.63319
1790 1 0.937429 0.947413 0.633374
1555 1 0.572591 0.508618 0.551433
2040 1 0.627957 0.936811 0.94068
1672 1 0.119637 0.554987 0.687893
1699 1 0.0581429 0.626361 0.696799
1794 1 0.080009 0.555556 0.754465
1800 1 0.147181 0.568221 0.809946
1829 1 0.13552 0.623647 0.74892
1827 1 0.0751359 0.629021 0.815346
1825 1 0.00157492 0.636229 0.756443
1796 1 0.0158056 0.567817 0.805539
1061 1 0.134893 0.638618 0.993651
2045 1 0.870148 0.878354 0.878613
1798 1 0.203637 0.566856 0.74595
1676 1 0.261028 0.54149 0.689414
1703 1 0.184809 0.626513 0.681924
1707 1 0.297081 0.624235 0.694102
1804 1 0.245083 0.566286 0.820387
1831 1 0.186515 0.641902 0.810338
1833 1 0.248717 0.643077 0.744924
1835 1 0.314263 0.643619 0.803904
1802 1 0.296203 0.56948 0.760716
1037 1 0.371187 0.50808 0.999697
1680 1 0.358277 0.562758 0.681386
1711 1 0.4244 0.630437 0.688388
1806 1 0.441943 0.567119 0.750873
1837 1 0.372586 0.627269 0.7472
1839 1 0.425275 0.636898 0.805457
1808 1 0.367425 0.576069 0.811697
1841 1 0.480895 0.633662 0.750275
1683 1 0.546489 0.506003 0.7083
775 1 0.20266 0.998955 0.820421
1715 1 0.554867 0.637874 0.675661
1812 1 0.497689 0.576028 0.810013
1684 1 0.506732 0.569881 0.67761
1688 1 0.611941 0.569973 0.678944
1810 1 0.552785 0.56423 0.753747
1816 1 0.628294 0.568463 0.803914
1843 1 0.552912 0.635985 0.805428
1845 1 0.619726 0.630941 0.740154
1105 1 0.503725 0.754895 0.985465
1034 1 0.315945 0.556551 0.998275
2015 1 0.929768 0.753955 0.934815
1692 1 0.738943 0.56129 0.689843
1719 1 0.679963 0.628995 0.677634
1814 1 0.67673 0.55928 0.737495
1820 1 0.740003 0.564543 0.792425
1849 1 0.746811 0.623294 0.737583
1851 1 0.821873 0.641883 0.807069
1818 1 0.813118 0.570486 0.758593
1723 1 0.808573 0.629856 0.679278
1847 1 0.679106 0.630924 0.796491
1622 1 0.694872 0.816913 0.501387
2026 1 0.321069 0.933684 0.874354
1668 1 0.00161641 0.550264 0.691927
1696 1 0.867747 0.568443 0.692261
1727 1 0.936716 0.620347 0.692061
1822 1 0.937906 0.554054 0.754391
1824 1 0.878493 0.563723 0.806043
1853 1 0.870993 0.620269 0.751233
1855 1 0.932516 0.629236 0.798814
1921 1 0.985802 0.508446 0.869208
2032 1 0.381012 0.936744 0.937401
1826 1 0.0807973 0.68209 0.748804
1704 1 0.12443 0.677993 0.680486
1731 1 0.070339 0.755927 0.681903
1736 1 0.131592 0.818875 0.688611
1859 1 0.0641793 0.75404 0.815074
1861 1 0.111521 0.760471 0.744401
1864 1 0.129457 0.818756 0.807558
1832 1 0.128859 0.691221 0.807491
1858 1 0.0581076 0.822646 0.759003
1732 1 0.0156928 0.815219 0.684325
1700 1 0.0123007 0.702903 0.701072
1860 1 0.00329759 0.827669 0.812835
1834 1 0.323 0.688258 0.742466
1739 1 0.302031 0.746807 0.680102
1830 1 0.184109 0.692155 0.747925
1740 1 0.242392 0.811399 0.670195
1708 1 0.247609 0.682811 0.679502
1735 1 0.181548 0.753772 0.686433
1836 1 0.265717 0.698965 0.810633
1865 1 0.247954 0.7437 0.741667
1868 1 0.24032 0.820014 0.808097
1863 1 0.194217 0.752434 0.821037
1867 1 0.314991 0.766759 0.822679
1866 1 0.295705 0.804461 0.740179
1862 1 0.189481 0.813175 0.74149
1840 1 0.371803 0.707571 0.803478
1744 1 0.356219 0.809092 0.683614
1712 1 0.371809 0.690161 0.677282
1743 1 0.420897 0.752948 0.684708
1838 1 0.43067 0.69662 0.749595
1869 1 0.367827 0.761744 0.748145
1870 1 0.444586 0.812807 0.741914
1871 1 0.438308 0.760911 0.806357
1872 1 0.374519 0.82 0.812243
1844 1 0.49573 0.686809 0.81329
1873 1 0.494444 0.744388 0.74629
1716 1 0.485372 0.683785 0.68533
1748 1 0.493662 0.812513 0.67854
1876 1 0.500661 0.81425 0.816415
1720 1 0.622297 0.681038 0.674616
1842 1 0.55194 0.687401 0.738406
1747 1 0.55161 0.757404 0.670048
1752 1 0.621861 0.810448 0.685259
1877 1 0.617308 0.749257 0.730547
1848 1 0.612785 0.690826 0.799551
1880 1 0.625012 0.804769 0.797951
1875 1 0.551878 0.744538 0.808005
1874 1 0.553533 0.805319 0.740525
1751 1 0.674198 0.753033 0.677584
1878 1 0.684679 0.818089 0.740569
1724 1 0.75231 0.679197 0.676579
1846 1 0.675428 0.689974 0.738277
1850 1 0.822801 0.687768 0.734407
1852 1 0.744784 0.681451 0.793088
1881 1 0.754105 0.738564 0.742288
1882 1 0.808592 0.811774 0.756821
1884 1 0.739069 0.812507 0.81132
1879 1 0.687611 0.744017 0.799345
1755 1 0.805647 0.757348 0.686294
1883 1 0.806278 0.746975 0.807062
1756 1 0.741653 0.801849 0.681033
1828 1 0.990302 0.700004 0.808206
1857 1 0.00445425 0.763245 0.753641
1728 1 0.877122 0.695511 0.674022
1759 1 0.939167 0.764761 0.700511
1854 1 0.935599 0.689576 0.742005
1856 1 0.883202 0.696759 0.797015
1885 1 0.870273 0.760598 0.756718
1887 1 0.941038 0.765099 0.80757
1886 1 0.942224 0.833795 0.746646
1760 1 0.873044 0.819252 0.695674
1888 1 0.873734 0.824906 0.808311
1763 1 0.0669978 0.89104 0.680456
1768 1 0.13516 0.948558 0.683183
1890 1 0.0719176 0.945566 0.760274
1891 1 0.0738221 0.888538 0.81695
1893 1 0.130007 0.880443 0.746897
1896 1 0.138871 0.942428 0.809177
1 1 -0.000345354 1.00054 0.985762
1889 1 0.991597 0.890052 0.76296
1892 1 0.991231 0.954422 0.817775
1081 1 0.752757 0.618298 0.985083
1561 1 0.726648 0.499815 0.508004
1767 1 0.191559 0.87958 0.686637
1772 1 0.251286 0.937505 0.686555
1895 1 0.187436 0.884019 0.827287
1894 1 0.203743 0.943216 0.759878
1897 1 0.237257 0.875193 0.748985
1898 1 0.310414 0.928991 0.740973
1900 1 0.260555 0.936575 0.816079
1771 1 0.301876 0.876917 0.685035
1899 1 0.311587 0.870488 0.806565
1693 1 0.876449 0.502379 0.643031
911 1 0.445789 0.990647 0.932933
1819 1 0.805275 0.502167 0.816109
1775 1 0.439457 0.881571 0.684573
1776 1 0.376379 0.940809 0.674989
1901 1 0.373089 0.869296 0.747204
1902 1 0.430301 0.929905 0.75138
1903 1 0.437285 0.871019 0.81659
1904 1 0.36072 0.934372 0.80388
901 1 0.139686 0.997202 0.876433
1908 1 0.504662 0.933637 0.819032
1905 1 0.500396 0.872981 0.748976
1935 1 0.433193 0.520077 0.943852
1780 1 0.500135 0.936322 0.698187
1779 1 0.563316 0.87341 0.686774
1784 1 0.611428 0.947133 0.683542
1906 1 0.560208 0.934531 0.746375
1907 1 0.558455 0.883478 0.816587
1909 1 0.61905 0.866614 0.744557
1912 1 0.622407 0.938304 0.803008
2013 1 0.866956 0.760866 0.871368
1783 1 0.671506 0.879808 0.676346
1787 1 0.807913 0.875665 0.690035
1911 1 0.688684 0.884837 0.81583
1788 1 0.747427 0.941708 0.685399
1910 1 0.684739 0.928874 0.732162
1913 1 0.740244 0.870822 0.759029
1914 1 0.813713 0.944882 0.747207
1916 1 0.757257 0.941182 0.800066
1915 1 0.81269 0.887004 0.817763
2041 1 0.741601 0.874401 0.877129
2016 1 0.865348 0.817349 0.937515
2031 1 0.440344 0.868326 0.94242
1569 1 0.977567 0.624767 0.500665
1764 1 0.00324025 0.944907 0.702638
1792 1 0.875862 0.942925 0.683502
1920 1 0.869625 0.946275 0.820721
1791 1 0.942606 0.886619 0.688496
1917 1 0.867943 0.889593 0.75236
1918 1 0.933364 0.961517 0.759499
1919 1 0.935364 0.875109 0.817042
1809 1 0.491035 0.505618 0.770446
1922 1 0.0790721 0.545239 0.865939
1928 1 0.116821 0.577712 0.929277
1955 1 0.0637864 0.641881 0.935774
1957 1 0.129389 0.638529 0.883045
2017 1 1.00055 0.872175 0.88189
2036 1 0.498694 0.929636 0.940222
1926 1 0.182419 0.564945 0.883415
1930 1 0.309444 0.574807 0.878552
1963 1 0.3071 0.638292 0.93652
1961 1 0.253971 0.639924 0.88063
1932 1 0.245202 0.557673 0.937703
1959 1 0.195667 0.630349 0.935184
2019 1 0.0682419 0.882473 0.947645
1982 1 0.93191 0.710264 0.86941
2029 1 0.379551 0.88448 0.873863
2038 1 0.708696 0.945125 0.889332
2024 1 0.12496 0.937131 0.937305
1967 1 0.432948 0.636867 0.9414
1965 1 0.370028 0.636361 0.87195
1934 1 0.428661 0.573256 0.872295
1936 1 0.370598 0.58749 0.937681
2043 1 0.811456 0.866277 0.942757
1985 1 0.0123405 0.763055 0.878259
1582 1 0.439708 0.701636 0.505918
2039 1 0.679121 0.871829 0.94815
1940 1 0.488593 0.57206 0.92654
1969 1 0.494148 0.6276 0.872205
1938 1 0.56884 0.574775 0.865186
1944 1 0.621435 0.565212 0.928118
1971 1 0.564237 0.631207 0.920439
1973 1 0.62985 0.64269 0.858756
1562 1 0.815932 0.561768 0.51757
2042 1 0.801806 0.940674 0.880897
2035 1 0.564937 0.868679 0.937368
1975 1 0.69673 0.631164 0.922824
1942 1 0.680087 0.570382 0.868936
1946 1 0.82499 0.582323 0.870036
1977 1 0.758608 0.615841 0.849923
1948 1 0.759872 0.565802 0.923609
1979 1 0.827124 0.626626 0.943473
2044 1 0.764585 0.930941 0.943411
2018 1 0.049107 0.938726 0.875643
1026 1 0.0626372 0.576169 0.99856
2028 1 0.265951 0.935618 0.938025
2030 1 0.442121 0.933748 0.868458
1661 1 0.873983 0.864455 0.521039
1953 1 0.999242 0.63398 0.862671
1924 1 0.02005 0.579924 0.912463
1950 1 0.940409 0.577627 0.866312
1952 1 0.876156 0.568137 0.937317
1983 1 0.932122 0.630111 0.939589
1981 1 0.884409 0.640752 0.863058
2020 1 0.999303 0.939469 0.937781
2023 1 0.190205 0.857188 0.934394
1992 1 0.119834 0.818289 0.94048
1987 1 0.0650098 0.729369 0.928843
1960 1 0.13573 0.692807 0.93785
1989 1 0.12192 0.744342 0.877863
1986 1 0.066467 0.818692 0.872371
1954 1 0.0559901 0.688495 0.863822
1956 1 0.992859 0.6971 0.925681
1988 1 0.00178636 0.824759 0.947454
2037 1 0.624427 0.86563 0.853467
777 1 0.260067 1.00014 0.755069
2025 1 0.250939 0.875095 0.866972
1570 1 0.0520063 0.674491 0.501167
527 1 0.441692 0.997477 0.562529
1995 1 0.320197 0.764859 0.933165
1962 1 0.313569 0.698813 0.882665
1990 1 0.176067 0.810609 0.869406
1958 1 0.206869 0.699756 0.87587
1993 1 0.255758 0.761332 0.870087
1964 1 0.252452 0.694758 0.94587
1996 1 0.261869 0.813717 0.934664
1991 1 0.192274 0.759845 0.930855
1994 1 0.321282 0.82215 0.871782
2022 1 0.202296 0.941567 0.888517
913 1 0.509303 0.991335 0.87085
1042 1 0.549569 0.566349 0.981552
1984 1 0.869021 0.693825 0.941216
1968 1 0.365199 0.69541 0.949182
1966 1 0.417276 0.695439 0.87307
2000 1 0.38096 0.819375 0.94223
1997 1 0.374362 0.756851 0.87359
1998 1 0.442971 0.80958 0.882953
1999 1 0.434817 0.749915 0.947828
2004 1 0.506351 0.813986 0.931828
1972 1 0.498017 0.683914 0.932271
2001 1 0.491942 0.748281 0.87934
2014 1 0.940761 0.81614 0.882824
2034 1 0.574135 0.929194 0.880377
1970 1 0.564546 0.689732 0.867841
1976 1 0.627102 0.68574 0.933978
2003 1 0.559997 0.761141 0.931595
2008 1 0.623244 0.813435 0.928187
2005 1 0.640758 0.737378 0.871889
2002 1 0.566999 0.808699 0.868313
1679 1 0.424506 0.501989 0.693354
2027 1 0.324966 0.872924 0.940011
1538 1 0.0535105 0.548949 0.509707
2009 1 0.756649 0.750367 0.870096
2006 1 0.696111 0.80216 0.885883
2011 1 0.808869 0.764741 0.937588
2010 1 0.811126 0.821923 0.865416
1974 1 0.706618 0.683419 0.862927
1978 1 0.813892 0.695866 0.886152
2007 1 0.689794 0.744755 0.94433
2012 1 0.74312 0.815516 0.948319
1980 1 0.755776 0.698414 0.94192
2021 1 0.121848 0.877079 0.877819
1559 1 0.666917 0.503741 0.56892
905 1 0.257391 0.997637 0.876499
1923 1 0.070895 0.512034 0.939968
1943 1 0.679428 0.508224 0.932538
779 1 0.310697 0.998645 0.808374
771 1 0.0814049 0.997028 0.819121
903 1 0.197266 0.996449 0.940246
1574 1 0.188148 0.687326 0.50333
1141 1 0.619536 0.867238 0.989806
1949 1 0.877631 0.520167 0.87078
1937 1 0.489419 0.508329 0.867968
1122 1 0.0655199 0.94057 1.00274
17 1 0.487396 0.997212 0.992121
1630 1 0.957182 0.813431 0.508595
1566 1 0.935028 0.548432 0.509281
1045 1 0.620439 0.513331 0.984104
1074 1 0.54883 0.693369 0.996351
1134 1 0.4448 0.934961 0.994347
1097 1 0.263014 0.759057 0.996555
1109 1 0.614229 0.739445 0.990976
521 1 0.237527 0.998982 0.504056
1606 1 0.175535 0.825284 0.505947
1073 1 0.496369 0.621679 0.997938
1609 1 0.238777 0.741904 0.503833
1149 1 0.876436 0.870862 0.995722
1082 1 0.819471 0.686564 0.996925
1593 1 0.738269 0.615687 0.512377
1130 1 0.315631 0.938343 0.996257
1654 1 0.690808 0.94745 0.503011
|
[
"ITEM: TIMESTEP\n1000\nITEM: NUMBER OF ATOMS\n2048\nITEM: BOX BOUNDS pp pp pp\n-2.7012907437591949e-01 4.7470129074369581e+01\n-2.7012907437591949e-01 4.7470129074369581e+01\n-2.7012907437591949e-01 4.7470129074369581e+01\nITEM: ATOMS id type xs ys zs\n8 1 0.118514 0.0599866 0.0631161\n35 1 0.0692648 0.12677 0.0619928\n130 1 0.0703103 0.0649093 0.126251\n165 1 0.138603 0.117881 0.127473\n155 1 0.802452 -0.00225189 0.190937\n279 1 0.682405 -0.00102136 0.320418\n85 1 0.617155 0.249443 0.014004\n134 1 0.190224 0.042665 0.122036\n12 1 0.248376 0.0662609 0.0678188\n39 1 0.189758 0.118437 0.0604761\n43 1 0.30201 0.122494 0.054495\n138 1 0.318919 0.0606008 0.121184\n169 1 0.25401 0.123982 0.124703\n618 1 0.314046 0.423935 0.498843\n157 1 0.882592 0.0144295 0.135182\n1167 1 0.446654 0.498932 0.178384\n1439 1 0.930856 0.489936 0.429996\n16 1 0.377532 0.0788858 0.0614955\n47 1 0.443232 0.119355 0.0599885\n142 1 0.437491 0.0522965 0.123421\n173 1 0.381328 0.126711 0.132362\n177 1 0.498436 0.123563 0.127073\n594 1 0.571856 0.317841 0.497458\n1183 1 0.939923 0.497092 0.177559\n1415 1 0.183325 0.497855 0.452079\n50 1 0.559938 0.181833 -0.000667064\n20 1 0.504471 0.0635768 0.0603335\n24 1 0.626716 0.0534061 0.0583679\n51 1 0.567867 0.120083 0.0614\n146 1 0.550245 0.0584699 0.124254\n181 1 0.634136 0.115482 0.116651\n407 1 0.686788 0.00162551 0.449686\n49 1 0.497971 0.119963 0.00499224\n150 1 0.698101 0.046347 0.110991\n28 1 0.755827 0.0629804 0.060971\n55 1 0.690196 0.125129 0.0563922\n59 1 0.814925 0.123202 0.0486223\n154 1 0.814239 0.0472368 0.1162\n185 1 0.748493 0.115375 0.124793\n389 1 0.131156 0.00768717 0.374367\n122 1 0.813826 0.45016 -0.00226142\n4 1 0.00571049 0.0642641 0.0516869\n161 1 0.0145263 0.120345 0.125595\n32 1 0.882638 0.0641087 0.0624529\n63 1 0.944441 0.133281 0.0668484\n158 1 0.942773 0.0621624 0.121663\n189 1 0.884011 0.12647 0.120856\n31 1 0.949647 0.00451724 0.0592671\n40 1 0.126322 0.180487 0.0591702\n67 1 0.076321 0.256897 0.0481963\n162 1 0.0737709 0.18292 0.125443\n197 1 0.124175 0.243848 0.132161\n72 1 0.128136 0.314366 0.0799744\n194 1 0.068254 0.291934 0.117875\n193 1 0.00924748 0.233044 0.121644\n36 1 0.0115579 0.187866 0.0596469\n1437 1 0.863518 0.486486 0.384824\n411 1 0.807086 0.0047618 0.438232\n391 1 0.187557 0.0128456 0.441716\n166 1 0.186668 0.177231 0.122783\n198 1 0.178173 0.312545 0.144773\n170 1 0.317746 0.169357 0.138677\n44 1 0.246056 0.188666 0.0715916\n71 1 0.189382 0.250992 0.0744894\n201 1 0.238849 0.241488 0.132444\n75 1 0.301765 0.250298 0.0782923\n76 1 0.239888 0.308852 0.0584296\n202 1 0.293209 0.307263 0.137376\n606 1 0.942553 0.303178 0.49923\n401 1 0.496693 0.000873129 0.37682\n102 1 0.182723 0.436664 -0.000478082\n1427 1 0.570435 0.500057 0.429309\n48 1 0.393356 0.190678 0.0599849\n174 1 0.437689 0.18471 0.139591\n79 1 0.440983 0.255103 0.0581657\n205 1 0.365623 0.249313 0.130058\n80 1 0.379197 0.303608 0.0704467\n206 1 0.432488 0.323907 0.127045\n621 1 0.373617 0.367529 0.49599\n14 1 0.43862 0.0587564 0.00627447\n52 1 0.484423 0.188543 0.0665317\n84 1 0.500018 0.31934 0.07723\n209 1 0.488284 0.254011 0.128159\n178 1 0.546072 0.185375 0.136579\n56 1 0.613229 0.185539 0.0768676\n83 1 0.545828 0.242915 0.0664197\n88 1 0.611037 0.319933 0.0619186\n210 1 0.555776 0.302342 0.129695\n213 1 0.622974 0.244927 0.125081\n186 1 0.802287 0.173743 0.124388\n182 1 0.69117 0.179418 0.113282\n60 1 0.75244 0.195119 0.0569863\n87 1 0.691608 0.250977 0.0559722\n217 1 0.754972 0.236141 0.125877\n218 1 0.823524 0.299716 0.118129\n92 1 0.760961 0.313528 0.0687033\n91 1 0.82084 0.237089 0.0685305\n214 1 0.686067 0.313695 0.116799\n1039 1 0.445446 0.49796 0.0697614\n1173 1 0.627414 0.495995 0.116266\n283 1 0.820211 0.00151803 0.318055\n74 1 0.323208 0.294976 0.00990392\n68 1 0.00301754 0.313786 0.0554314\n190 1 0.950554 0.184051 0.13353\n64 1 0.874802 0.186404 0.0518447\n95 1 0.920281 0.251951 0.0571724\n221 1 0.875537 0.220154 0.135128\n222 1 0.95689 0.308093 0.121029\n96 1 0.873156 0.312464 0.0536935\n1177 1 0.753974 0.500885 0.119529\n546 1 0.0630301 0.184789 0.495837\n99 1 0.0687252 0.384531 0.0673004\n104 1 0.136695 0.440047 0.0634071\n226 1 0.0742081 0.450335 0.121242\n229 1 0.126825 0.375727 0.135456\n19 1 0.548714 0.00136653 0.058336\n265 1 0.250521 0.00638232 0.237985\n399 1 0.442478 0.0051812 0.437462\n65 1 0.013382 0.251309 0.00485621\n143 1 0.43864 0.00191848 0.186309\n103 1 0.195233 0.369267 0.0611842\n108 1 0.251689 0.420277 0.0546987\n230 1 0.189032 0.429653 0.119762\n233 1 0.246148 0.374706 0.125159\n234 1 0.304974 0.434872 0.136514\n107 1 0.304012 0.347258 0.066143\n1305 1 0.755913 0.493287 0.248738\n1283 1 0.0579747 0.488959 0.312582\n111 1 0.41954 0.370177 0.0672681\n112 1 0.354168 0.432917 0.0728413\n237 1 0.366392 0.370227 0.129009\n238 1 0.430399 0.430965 0.133122\n578 1 0.0795431 0.302974 0.492861\n116 1 0.501339 0.432268 0.0580372\n241 1 0.482526 0.378881 0.134142\n115 1 0.566084 0.382841 0.047451\n120 1 0.640461 0.440452 0.0528615\n242 1 0.56158 0.431715 0.110301\n245 1 0.622856 0.376628 0.132713\n38 1 0.190477 0.190911 0.00114873\n566 1 0.677669 0.188694 0.496938\n246 1 0.685867 0.443722 0.117934\n119 1 0.690218 0.369414 0.0595599\n123 1 0.818524 0.387424 0.0536907\n124 1 0.748616 0.445361 0.0507502\n249 1 0.757569 0.377445 0.116976\n250 1 0.811663 0.438016 0.112322\n510 1 0.915903 0.434147 0.36995\n126 1 0.972249 0.449212 0.0017488\n511 1 0.939026 0.368121 0.434918\n1311 1 0.940591 0.48799 0.302905\n23 1 0.691187 0.00148969 0.0509751\n100 1 0.997448 0.449269 0.0774573\n225 1 0.994329 0.376387 0.106942\n127 1 0.929108 0.388978 0.050183\n128 1 0.878847 0.45188 0.0501073\n253 1 0.881601 0.366095 0.123638\n254 1 0.935466 0.436971 0.127853\n9 1 0.265799 0.000498899 0.0113705\n163 1 0.0707224 0.12067 0.185834\n136 1 0.131257 0.0632023 0.193464\n258 1 0.065459 0.0641483 0.244256\n264 1 0.130556 0.0663226 0.310906\n291 1 0.066086 0.128529 0.320081\n293 1 0.126742 0.121291 0.250636\n260 1 0.00938821 0.064928 0.310796\n289 1 0.001251 0.135354 0.26037\n1421 1 0.370749 0.492516 0.371624\n171 1 0.318207 0.106202 0.191346\n262 1 0.200675 0.0637096 0.25129\n167 1 0.192966 0.130268 0.188828\n140 1 0.249489 0.0623364 0.178885\n266 1 0.327336 0.0583692 0.25424\n297 1 0.255416 0.124076 0.251139\n268 1 0.267696 0.049357 0.312791\n299 1 0.313521 0.111967 0.323083\n144 1 0.372563 0.0565053 0.186376\n175 1 0.435031 0.113593 0.19408\n270 1 0.428226 0.0647762 0.253003\n301 1 0.370036 0.119884 0.25734\n303 1 0.437192 0.12239 0.317112\n272 1 0.369799 0.0496622 0.318752\n148 1 0.503398 0.0615788 0.194244\n1179 1 0.8207 0.492422 0.181946\n276 1 0.494555 0.0625772 0.305486\n305 1 0.500672 0.120962 0.246257\n179 1 0.57125 0.120613 0.172074\n152 1 0.630617 0.0560961 0.186879\n274 1 0.566035 0.0738323 0.245885\n309 1 0.633561 0.118334 0.250946\n280 1 0.618156 0.0540839 0.305725\n307 1 0.567833 0.121863 0.308126\n387 1 0.0697478 0.00643595 0.437573\n156 1 0.752557 0.0516617 0.175757\n183 1 0.692954 0.114931 0.177945\n278 1 0.691187 0.0604386 0.239381\n282 1 0.812934 0.0523742 0.259847\n284 1 0.750159 0.0589086 0.308726\n313 1 0.741775 0.12767 0.250088\n187 1 0.819947 0.110361 0.178322\n315 1 0.797582 0.118836 0.31243\n311 1 0.688368 0.108002 0.310323\n405 1 0.618834 0.00349033 0.374101\n160 1 0.882446 0.0668656 0.199144\n1035 1 0.302622 0.49573 0.0659356\n132 1 0.0103734 0.0683327 0.190101\n191 1 0.943116 0.116028 0.182531\n286 1 0.94532 0.0727851 0.251805\n288 1 0.876924 0.0607833 0.314023\n317 1 0.86857 0.118622 0.264616\n168 1 0.137856 0.183316 0.187528\n195 1 0.0743311 0.235452 0.196622\n200 1 0.109581 0.310019 0.193939\n290 1 0.0721373 0.18299 0.253189\n325 1 0.123716 0.247241 0.262148\n322 1 0.062828 0.311933 0.263019\n323 1 0.068578 0.249774 0.324761\n296 1 0.119282 0.189087 0.333289\n328 1 0.124947 0.317752 0.318061\n196 1 0.0194401 0.292051 0.190091\n164 1 0.0125664 0.183964 0.186864\n172 1 0.251399 0.186957 0.190209\n326 1 0.186542 0.302384 0.247595\n327 1 0.191837 0.251715 0.321374\n199 1 0.188637 0.240024 0.194065\n294 1 0.184304 0.190726 0.254172\n300 1 0.247523 0.180066 0.319473\n329 1 0.243051 0.249727 0.262853\n330 1 0.309259 0.310577 0.259672\n331 1 0.30739 0.247344 0.307115\n332 1 0.24043 0.322178 0.307556\n204 1 0.25269 0.31284 0.201194\n298 1 0.303916 0.183638 0.252862\n203 1 0.309328 0.247044 0.182746\n302 1 0.444861 0.178708 0.256232\n176 1 0.370881 0.186633 0.199857\n207 1 0.427222 0.247481 0.186114\n304 1 0.367232 0.186046 0.30512\n333 1 0.362061 0.256042 0.246677\n334 1 0.44034 0.316548 0.252522\n335 1 0.436936 0.254129 0.297573\n336 1 0.375638 0.306719 0.308759\n208 1 0.374193 0.312995 0.180471\n308 1 0.507375 0.181129 0.311045\n212 1 0.493034 0.310921 0.189424\n180 1 0.500946 0.180957 0.19797\n337 1 0.510236 0.251395 0.243778\n184 1 0.62257 0.178732 0.191661\n211 1 0.574451 0.248376 0.197664\n216 1 0.624436 0.308785 0.182108\n306 1 0.564385 0.174235 0.242509\n312 1 0.638816 0.172643 0.316978\n338 1 0.565728 0.315652 0.252899\n339 1 0.56265 0.2578 0.308184\n341 1 0.622307 0.231887 0.257868\n344 1 0.63008 0.308303 0.312779\n310 1 0.684542 0.183471 0.237051\n340 1 0.515481 0.319836 0.303985\n188 1 0.743699 0.173984 0.185743\n342 1 0.690746 0.313526 0.237522\n343 1 0.687366 0.249605 0.315489\n314 1 0.803843 0.175876 0.251949\n215 1 0.683977 0.243848 0.179279\n345 1 0.740194 0.250409 0.236417\n346 1 0.81016 0.322556 0.240466\n220 1 0.752354 0.302462 0.179514\n219 1 0.805974 0.243593 0.192992\n348 1 0.753464 0.302853 0.298569\n316 1 0.733869 0.17149 0.319092\n347 1 0.806721 0.243829 0.299436\n324 1 0.998384 0.309484 0.314138\n321 1 0.00472738 0.249669 0.263241\n292 1 -3.48524e-05 0.18787 0.317446\n192 1 0.86915 0.189475 0.211274\n223 1 0.950325 0.240918 0.198691\n224 1 0.869733 0.294903 0.184127\n318 1 0.939829 0.185931 0.252912\n320 1 0.870373 0.182399 0.309988\n349 1 0.861931 0.262995 0.258256\n350 1 0.937321 0.291816 0.255468\n352 1 0.873818 0.316685 0.309806\n351 1 0.935468 0.241755 0.313017\n1553 1 0.511316 0.49911 0.496922\n101 1 0.123586 0.382663 0.0058635\n227 1 0.0591683 0.372661 0.185434\n232 1 0.127525 0.439844 0.182481\n354 1 0.0590146 0.430447 0.251999\n355 1 0.0507042 0.380194 0.313365\n357 1 0.115009 0.375403 0.239623\n360 1 0.124379 0.436125 0.306641\n3 1 0.0569214 0.00126409 0.0601337\n1161 1 0.235651 0.496119 0.118679\n45 1 0.373823 0.134806 0.00641269\n231 1 0.183001 0.37841 0.189905\n236 1 0.241633 0.434285 0.189762\n358 1 0.181967 0.439963 0.252226\n361 1 0.267945 0.380422 0.248152\n364 1 0.245792 0.435535 0.306225\n363 1 0.323423 0.372394 0.317974\n362 1 0.307359 0.447638 0.241571\n235 1 0.321659 0.37203 0.193938\n359 1 0.181426 0.381278 0.307363\n514 1 0.0722279 0.065593 0.494081\n622 1 0.439223 0.426951 0.481953\n240 1 0.367489 0.429214 0.18921\n239 1 0.424896 0.369324 0.187921\n365 1 0.379354 0.368766 0.257408\n366 1 0.436154 0.450428 0.248841\n367 1 0.437607 0.368689 0.315361\n368 1 0.379672 0.431514 0.318504\n244 1 0.509221 0.435704 0.183107\n372 1 0.499926 0.446236 0.304935\n369 1 0.499547 0.384687 0.252067\n243 1 0.552897 0.358223 0.186542\n248 1 0.629755 0.432905 0.188826\n370 1 0.560367 0.435859 0.245465\n371 1 0.568381 0.381061 0.310445\n373 1 0.620329 0.375681 0.24783\n376 1 0.635822 0.463798 0.302935\n509 1 0.86504 0.356649 0.383999\n374 1 0.686696 0.442238 0.24301\n375 1 0.691934 0.375482 0.312211\n247 1 0.691398 0.379844 0.176753\n252 1 0.748887 0.443305 0.176101\n377 1 0.738038 0.378944 0.236258\n380 1 0.747421 0.454819 0.328098\n378 1 0.812566 0.436518 0.244632\n379 1 0.786022 0.379454 0.298561\n251 1 0.815193 0.38675 0.182723\n484 1 0.00266108 0.42695 0.427328\n353 1 0.998238 0.360195 0.241094\n356 1 0.993109 0.430081 0.329548\n228 1 0.0107157 0.435179 0.175066\n255 1 0.941546 0.374804 0.175594\n256 1 0.885752 0.424401 0.189602\n381 1 0.883979 0.356512 0.233483\n382 1 0.957127 0.432555 0.243227\n383 1 0.944983 0.368109 0.311968\n384 1 0.866107 0.406348 0.313546\n386 1 0.0680251 0.068533 0.379664\n392 1 0.13419 0.0689766 0.43098\n419 1 0.0655568 0.127146 0.442593\n421 1 0.127483 0.126004 0.379446\n388 1 -0.000852562 0.0605331 0.438945\n417 1 0.00686336 0.122699 0.38276\n525 1 0.366203 0.00590256 0.497599\n554 1 0.339524 0.194979 0.485974\n598 1 0.701731 0.31107 0.49765\n295 1 0.183162 0.132046 0.31645\n82 1 0.557418 0.311008 0.0067994\n423 1 0.203405 0.122946 0.444606\n390 1 0.201726 0.0619705 0.371967\n394 1 0.317135 0.0455882 0.374177\n396 1 0.260996 0.0541055 0.430731\n425 1 0.241628 0.130419 0.376684\n427 1 0.310506 0.118174 0.431324\n10 1 0.32116 0.0607328 0.000550233\n398 1 0.439534 0.0525346 0.368472\n400 1 0.377848 0.0553987 0.422199\n429 1 0.375383 0.113342 0.367778\n431 1 0.448512 0.101567 0.443121\n630 1 0.691118 0.42488 0.498634\n433 1 0.498581 0.11914 0.367541\n404 1 0.519597 0.0640253 0.436275\n402 1 0.559267 0.0527202 0.358355\n408 1 0.6227 0.0617705 0.442596\n435 1 0.572995 0.123536 0.430971\n437 1 0.622568 0.112706 0.365199\n481 1 0.997341 0.358809 0.382754\n1165 1 0.367818 0.497661 0.140442\n1309 1 0.891404 0.490047 0.243884\n562 1 0.561654 0.181484 0.48601\n1423 1 0.430604 0.490676 0.430924\n406 1 0.682333 0.0544716 0.388791\n412 1 0.754501 0.0730608 0.441638\n439 1 0.680973 0.124113 0.433829\n441 1 0.740139 0.110888 0.372033\n443 1 0.807228 0.13019 0.433752\n410 1 0.797682 0.0560852 0.372701\n257 1 0.00295484 0.00763216 0.249672\n89 1 0.757532 0.263667 0.0066509\n319 1 0.946268 0.118886 0.314702\n589 1 0.396991 0.249207 0.480039\n414 1 0.943247 0.0660529 0.372274\n416 1 0.87479 0.0469134 0.431722\n445 1 0.872408 0.11783 0.360315\n447 1 0.945373 0.129415 0.433991\n601 1 0.749512 0.235626 0.498404\n418 1 0.0553018 0.193968 0.386331\n424 1 0.118797 0.183696 0.431976\n450 1 0.0656293 0.315449 0.381068\n451 1 0.0556372 0.259133 0.433577\n453 1 0.127245 0.247558 0.384759\n456 1 0.132483 0.312607 0.434768\n610 1 0.0496199 0.44117 0.489946\n503 1 0.685967 0.374213 0.431625\n1289 1 0.238966 0.501693 0.251155\n263 1 0.192968 0.0056835 0.300005\n460 1 0.244439 0.306528 0.434564\n428 1 0.255059 0.183217 0.443301\n422 1 0.183996 0.196677 0.375745\n426 1 0.316628 0.180418 0.371229\n454 1 0.192119 0.315028 0.371582\n455 1 0.189518 0.242617 0.454299\n457 1 0.250268 0.243958 0.371817\n458 1 0.298552 0.312051 0.360802\n459 1 0.308851 0.250712 0.428288\n1285 1 0.116481 0.491998 0.255369\n506 1 0.812904 0.430023 0.370899\n1417 1 0.249112 0.485407 0.377114\n430 1 0.441701 0.199704 0.360128\n432 1 0.389032 0.182223 0.425645\n461 1 0.370803 0.245054 0.37149\n462 1 0.454141 0.309045 0.373849\n463 1 0.454277 0.246273 0.432995\n464 1 0.372844 0.310179 0.428198\n468 1 0.518381 0.310551 0.445361\n436 1 0.492208 0.174056 0.430224\n465 1 0.513761 0.253854 0.364145\n440 1 0.620933 0.186598 0.437367\n467 1 0.566655 0.245832 0.428722\n469 1 0.621799 0.252399 0.372789\n434 1 0.558688 0.181588 0.374504\n466 1 0.563376 0.319869 0.365647\n472 1 0.620616 0.310887 0.427869\n505 1 0.743021 0.379794 0.382571\n538 1 0.813374 0.0813273 0.497929\n474 1 0.812251 0.288634 0.36542\n471 1 0.691481 0.25158 0.440328\n444 1 0.748957 0.18914 0.44206\n473 1 0.748427 0.239163 0.367052\n442 1 0.816563 0.187735 0.384058\n438 1 0.678334 0.196422 0.382618\n476 1 0.761744 0.31053 0.421663\n470 1 0.676501 0.311353 0.374078\n475 1 0.81564 0.243754 0.44874\n1307 1 0.824716 0.487753 0.303139\n449 1 -7.31866e-05 0.257654 0.379034\n452 1 0.000553588 0.310402 0.442312\n420 1 0.00423474 0.193711 0.437647\n446 1 0.945693 0.185782 0.37554\n480 1 0.879294 0.303477 0.443183\n448 1 0.879126 0.178648 0.446186\n478 1 0.932198 0.302689 0.367797\n479 1 0.934984 0.240049 0.439826\n477 1 0.882491 0.236655 0.372174\n508 1 0.751799 0.438068 0.442169\n1419 1 0.311483 0.502242 0.43804\n488 1 0.139401 0.429364 0.447633\n482 1 0.0748751 0.440477 0.379114\n483 1 0.0609223 0.376964 0.441506\n485 1 0.128904 0.377723 0.372408\n512 1 0.886452 0.421826 0.435743\n507 1 0.813739 0.379195 0.446697\n502 1 0.68172 0.445788 0.359489\n542 1 0.938851 0.0682873 0.488342\n487 1 0.193327 0.373452 0.428016\n490 1 0.324144 0.42509 0.378469\n486 1 0.189241 0.433775 0.377087\n489 1 0.24906 0.385354 0.369934\n492 1 0.242173 0.44208 0.445249\n491 1 0.30298 0.362211 0.435965\n259 1 0.0751918 0.0118994 0.316801\n590 1 0.435694 0.312226 0.498352\n494 1 0.436055 0.433976 0.368849\n495 1 0.462663 0.36695 0.430359\n497 1 0.510472 0.380108 0.36046\n496 1 0.368352 0.427843 0.439115\n493 1 0.38457 0.37103 0.378788\n501 1 0.630501 0.37889 0.365235\n504 1 0.627284 0.440439 0.431882\n500 1 0.51759 0.432052 0.438886\n498 1 0.569345 0.451105 0.364118\n499 1 0.571237 0.372667 0.436921\n626 1 0.566143 0.446515 0.498248\n545 1 0.00376218 0.121892 0.497779\n285 1 0.876943 0.00418521 0.253792\n1157 1 0.139656 0.497046 0.126973\n385 1 0.0133585 0.00778122 0.372984\n1409 1 0.995006 0.491869 0.37633\n110 1 0.421707 0.444431 0.00793448\n57 1 0.75032 0.13106 0.00662346\n593 1 0.5067 0.24472 0.498846\n565 1 0.630598 0.119456 0.498211\n585 1 0.26582 0.241364 0.499759\n25 1 0.753499 0.00893035 0.00308082\n637 1 0.880594 0.360539 0.493989\n558 1 0.439652 0.171778 0.496556\n41 1 0.250472 0.126748 0.00466704\n586 1 0.321745 0.30241 0.492484\n105 1 0.265927 0.358039 0.00199529\n121 1 0.748968 0.377222 0.00675747\n602 1 0.805627 0.308463 0.497316\n617 1 0.246973 0.361643 0.498163\n520 1 0.119484 0.0721829 0.568929\n547 1 0.0567147 0.120422 0.5675\n642 1 0.0605527 0.0658549 0.627865\n677 1 0.126463 0.135005 0.629719\n516 1 -0.000793228 0.0511352 0.567637\n98 1 0.0715222 0.448062 0.988917\n646 1 0.195074 0.0655452 0.639861\n524 1 0.254553 0.0688769 0.568231\n551 1 0.192697 0.130853 0.569312\n555 1 0.315257 0.119302 0.571498\n650 1 0.326255 0.0668198 0.633068\n681 1 0.257499 0.121413 0.62293\n90 1 0.815844 0.30733 0.978163\n528 1 0.379832 0.0697787 0.567435\n559 1 0.444851 0.118971 0.571744\n654 1 0.445887 0.056591 0.631284\n685 1 0.389673 0.122225 0.625994\n689 1 0.505996 0.116677 0.628854\n532 1 0.501425 0.0619956 0.562994\n536 1 0.623991 0.0552943 0.563254\n563 1 0.565017 0.125033 0.561884\n658 1 0.564152 0.0570042 0.630851\n693 1 0.632805 0.115808 0.625878\n18 1 0.551511 0.0590333 0.99471\n5 1 0.130084 0.00953429 0.998606\n671 1 0.931796 0.00305215 0.684179\n614 1 0.19016 0.42383 0.515139\n37 1 0.142579 0.121907 0.992345\n540 1 0.756679 0.0708244 0.57039\n567 1 0.677666 0.128052 0.55741\n571 1 0.81729 0.125708 0.570236\n662 1 0.691666 0.0608531 0.614085\n666 1 0.810194 0.0663205 0.632979\n697 1 0.747567 0.132373 0.62061\n550 1 0.179995 0.171365 0.501344\n673 1 0.998402 0.11608 0.624756\n544 1 0.873249 0.0706477 0.567657\n575 1 0.940873 0.122891 0.563819\n670 1 0.92993 0.0599448 0.63007\n701 1 0.889081 0.131842 0.623885\n1689 1 0.736807 0.495821 0.636098\n663 1 0.680879 0.00694437 0.683811\n669 1 0.872443 0.00734611 0.626887\n674 1 0.0554049 0.179878 0.624694\n552 1 0.136652 0.184411 0.571996\n579 1 0.0542564 0.239734 0.565753\n584 1 0.113514 0.292945 0.573478\n709 1 0.123614 0.242114 0.638961\n706 1 0.0588879 0.310987 0.633783\n580 1 0.995622 0.308554 0.558266\n682 1 0.320274 0.18103 0.631023\n678 1 0.200872 0.189542 0.624156\n556 1 0.263569 0.181278 0.562752\n583 1 0.197289 0.238821 0.549396\n710 1 0.182885 0.295604 0.621175\n713 1 0.245672 0.248958 0.617187\n714 1 0.332149 0.303865 0.627787\n587 1 0.315891 0.245045 0.565039\n588 1 0.2565 0.29963 0.560426\n577 1 0.00642403 0.246107 0.499577\n522 1 0.309199 0.0694449 0.503449\n686 1 0.445319 0.180423 0.630209\n560 1 0.372853 0.174806 0.561585\n591 1 0.428157 0.237125 0.556301\n592 1 0.376767 0.302067 0.554258\n717 1 0.377223 0.23427 0.624793\n718 1 0.442728 0.295625 0.625064\n721 1 0.502789 0.242329 0.632488\n564 1 0.50846 0.184832 0.554721\n596 1 0.518283 0.306729 0.563472\n568 1 0.620147 0.187872 0.569561\n595 1 0.55531 0.242525 0.57076\n600 1 0.624536 0.31152 0.555745\n690 1 0.558964 0.182211 0.632972\n722 1 0.562203 0.308242 0.633951\n725 1 0.631229 0.251034 0.630584\n1565 1 0.875942 0.479427 0.499035\n33 1 0.99181 0.132856 0.999343\n526 1 0.436278 0.0636613 0.517299\n1547 1 0.321973 0.495016 0.556104\n694 1 0.687564 0.190329 0.626284\n572 1 0.747246 0.196711 0.568958\n599 1 0.684709 0.253102 0.554427\n604 1 0.749972 0.308596 0.561346\n729 1 0.736712 0.256784 0.631543\n726 1 0.676773 0.32416 0.625437\n698 1 0.802341 0.198614 0.638934\n603 1 0.816013 0.251538 0.569909\n730 1 0.811119 0.319315 0.629385\n1947 1 0.827035 0.495085 0.934463\n661 1 0.626559 0.000484791 0.627034\n1793 1 0.996006 0.484571 0.754541\n548 1 0.0042625 0.185055 0.564697\n705 1 0.00795119 0.247313 0.633949\n576 1 0.880031 0.183654 0.562893\n607 1 0.945213 0.250019 0.574366\n608 1 0.878752 0.304551 0.573346\n702 1 0.945816 0.188006 0.634644\n733 1 0.87456 0.241515 0.638212\n734 1 0.939441 0.319223 0.637954\n582 1 0.191731 0.305239 0.497595\n114 1 0.57076 0.449494 0.997252\n569 1 0.753676 0.125724 0.51479\n611 1 0.0589659 0.371155 0.552918\n616 1 0.110959 0.43862 0.561642\n738 1 0.0591427 0.421152 0.63104\n741 1 0.126098 0.366226 0.609\n737 1 0.00519757 0.364963 0.617639\n42 1 0.308816 0.189912 1.00184\n1801 1 0.241194 0.491972 0.752468\n549 1 0.124714 0.123612 0.504035\n638 1 0.9427 0.423314 0.500605\n1795 1 0.0798952 0.50077 0.806352\n615 1 0.189443 0.350305 0.561701\n619 1 0.32632 0.365718 0.556359\n620 1 0.261696 0.425858 0.555777\n742 1 0.200024 0.431295 0.618819\n745 1 0.262514 0.358467 0.615278\n746 1 0.321881 0.423414 0.609587\n62 1 0.936565 0.194836 0.995879\n927 1 0.927834 0.0050196 0.930338\n647 1 0.194767 0.00308285 0.686708\n623 1 0.446301 0.356272 0.575178\n624 1 0.384693 0.429706 0.558568\n749 1 0.388014 0.368198 0.63455\n750 1 0.427655 0.443877 0.637205\n789 1 0.633119 0.0113925 0.743067\n118 1 0.693371 0.433829 0.99837\n793 1 0.746829 0.00912197 0.749596\n530 1 0.560277 0.0653703 0.502939\n541 1 0.87264 0.000483334 0.499698\n628 1 0.479452 0.42871 0.552224\n753 1 0.508283 0.371353 0.616624\n627 1 0.5725 0.371505 0.560323\n632 1 0.618899 0.441187 0.566306\n754 1 0.536276 0.447316 0.603302\n757 1 0.620296 0.387388 0.620955\n625 1 0.505561 0.370067 0.509726\n631 1 0.685935 0.370053 0.560614\n636 1 0.742775 0.431522 0.56053\n758 1 0.668282 0.441908 0.630658\n761 1 0.744234 0.378059 0.613696\n762 1 0.803772 0.438264 0.626331\n635 1 0.829306 0.362481 0.563769\n919 1 0.686683 0.00288297 0.948113\n897 1 1.00126 0.0152432 0.867365\n1671 1 0.184022 0.484898 0.672949\n1665 1 0.988401 0.491262 0.650882\n612 1 0.993274 0.431294 0.570367\n639 1 0.93155 0.362352 0.560425\n640 1 0.881995 0.430557 0.558822\n765 1 0.879353 0.373671 0.623683\n766 1 0.930571 0.434037 0.639399\n1021 1 0.863251 0.380186 0.875939\n46 1 0.437599 0.182515 0.994737\n675 1 0.0695834 0.123738 0.68783\n648 1 0.12634 0.0582919 0.683276\n770 1 0.0648734 0.0546171 0.761085\n805 1 0.111493 0.125689 0.763015\n776 1 0.130526 0.0643865 0.817327\n803 1 0.0582228 0.124866 0.821419\n518 1 0.189343 0.0678568 0.522961\n772 1 0.999197 0.0728225 0.809949\n613 1 0.12934 0.372237 0.509516\n774 1 0.205574 0.0581839 0.746462\n679 1 0.176286 0.13379 0.694977\n807 1 0.187328 0.128922 0.800503\n652 1 0.263206 0.0536113 0.693804\n683 1 0.319462 0.132762 0.691907\n778 1 0.310093 0.0664434 0.750302\n780 1 0.251672 0.0680278 0.818581\n809 1 0.257045 0.130103 0.751678\n811 1 0.308953 0.129778 0.815355\n1927 1 0.177492 0.490945 0.944433\n687 1 0.437809 0.135074 0.701602\n656 1 0.381683 0.0572173 0.691041\n782 1 0.445393 0.0600293 0.763184\n784 1 0.365069 0.0587097 0.810888\n813 1 0.379319 0.12349 0.756901\n815 1 0.443808 0.121919 0.814987\n788 1 0.506568 0.0555537 0.820425\n923 1 0.807105 0.0107831 0.940682\n660 1 0.501312 0.0614201 0.690428\n817 1 0.502402 0.128513 0.757558\n664 1 0.618135 0.0663291 0.690149\n691 1 0.56004 0.125495 0.691233\n786 1 0.56466 0.068407 0.755709\n819 1 0.560328 0.122455 0.815474\n821 1 0.626228 0.125539 0.762982\n792 1 0.620942 0.0691399 0.816997\n539 1 0.815726 0.00343228 0.561163\n1019 1 0.801515 0.381963 0.942037\n1024 1 0.890191 0.442669 0.938776\n1929 1 0.250897 0.500074 0.883178\n695 1 0.69099 0.119127 0.697062\n699 1 0.803814 0.134229 0.681247\n790 1 0.688727 0.0629733 0.751189\n668 1 0.743568 0.0677211 0.67635\n794 1 0.808233 0.0585816 0.750065\n796 1 0.750273 0.063013 0.80975\n823 1 0.683157 0.124458 0.813009\n825 1 0.74964 0.121352 0.742366\n827 1 0.80306 0.130363 0.813486\n529 1 0.501183 0.00648414 0.506452\n1667 1 0.0702545 0.491452 0.701943\n801 1 0.017878 0.12229 0.751484\n644 1 0.00780911 0.066073 0.700848\n672 1 0.870123 0.0748916 0.695052\n703 1 0.94919 0.115635 0.707172\n798 1 0.94061 0.0514792 0.753121\n800 1 0.86633 0.0578959 0.811779\n829 1 0.868083 0.124995 0.7665\n831 1 0.940735 0.125737 0.813735\n707 1 0.0669627 0.25393 0.688929\n712 1 0.123995 0.319381 0.677755\n808 1 0.133606 0.193717 0.81744\n680 1 0.126292 0.191531 0.697509\n802 1 0.0580881 0.194035 0.756614\n834 1 0.0539715 0.30961 0.754408\n835 1 0.0702148 0.251113 0.81234\n837 1 0.122527 0.258933 0.750769\n840 1 0.125237 0.310903 0.824252\n716 1 0.270661 0.320983 0.684817\n715 1 0.313564 0.241916 0.690276\n839 1 0.185474 0.259669 0.829304\n684 1 0.251713 0.202551 0.681179\n711 1 0.18955 0.253114 0.689188\n806 1 0.189343 0.198419 0.745919\n810 1 0.320624 0.191992 0.753735\n838 1 0.190592 0.313895 0.744657\n841 1 0.250874 0.243018 0.761431\n842 1 0.31399 0.295308 0.746015\n844 1 0.254736 0.310935 0.808023\n843 1 0.307189 0.253893 0.811603\n812 1 0.247307 0.187653 0.814995\n720 1 0.383342 0.306655 0.690439\n688 1 0.378867 0.195841 0.698353\n719 1 0.453679 0.25657 0.69955\n814 1 0.452301 0.194312 0.760686\n816 1 0.376947 0.178423 0.827594\n845 1 0.387651 0.249562 0.751261\n846 1 0.440361 0.315926 0.749525\n848 1 0.379811 0.315076 0.802185\n847 1 0.43264 0.251706 0.817377\n724 1 0.505445 0.308845 0.68375\n852 1 0.500485 0.306224 0.817565\n820 1 0.49086 0.194048 0.829385\n692 1 0.499192 0.179909 0.686996\n849 1 0.51071 0.245278 0.763146\n818 1 0.570153 0.182568 0.741173\n696 1 0.641398 0.182927 0.685012\n723 1 0.567049 0.250101 0.692472\n824 1 0.628044 0.196963 0.823274\n853 1 0.634362 0.239732 0.758002\n856 1 0.621285 0.320019 0.814337\n850 1 0.55851 0.306778 0.74992\n851 1 0.566694 0.256725 0.825507\n728 1 0.632984 0.323653 0.692285\n822 1 0.68885 0.184818 0.757259\n700 1 0.743836 0.191384 0.697881\n826 1 0.818976 0.184078 0.753628\n727 1 0.686144 0.258258 0.689294\n731 1 0.815606 0.247323 0.702101\n854 1 0.675718 0.307933 0.752753\n857 1 0.746206 0.258693 0.746633\n859 1 0.802904 0.261844 0.8141\n860 1 0.73886 0.321248 0.826298\n828 1 0.749299 0.198793 0.817685\n855 1 0.69081 0.25198 0.814816\n858 1 0.799329 0.320465 0.762513\n732 1 0.7399 0.32227 0.693956\n708 1 0.998825 0.306306 0.69385\n804 1 0.996802 0.181345 0.8063\n836 1 0.996772 0.311601 0.818859\n676 1 0.00684498 0.181565 0.692723\n833 1 -0.000332978 0.259737 0.760563\n735 1 0.944316 0.251927 0.7048\n830 1 0.937192 0.18658 0.755637\n704 1 0.871334 0.181069 0.698997\n736 1 0.872321 0.318292 0.682439\n832 1 0.866169 0.185238 0.810259\n861 1 0.877688 0.25643 0.744652\n864 1 0.870367 0.324343 0.817676\n863 1 0.924774 0.252404 0.803592\n862 1 0.937565 0.3101 0.753295\n34 1 0.0817242 0.180423 0.996098\n739 1 0.0514445 0.373116 0.701635\n744 1 0.135686 0.404834 0.689589\n866 1 0.0677881 0.4354 0.757566\n869 1 0.127634 0.364291 0.755621\n872 1 0.136041 0.439361 0.798672\n867 1 0.0757071 0.377875 0.810963\n865 1 0.995713 0.373477 0.768842\n1543 1 0.187994 0.484944 0.56052\n1022 1 0.937146 0.428828 0.861333\n899 1 0.0552067 -0.00136842 0.929307\n633 1 0.75307 0.379665 0.504484\n743 1 0.19695 0.361827 0.663307\n747 1 0.310986 0.38961 0.685953\n748 1 0.247876 0.439201 0.693589\n870 1 0.18863 0.425039 0.746142\n871 1 0.195549 0.363326 0.808324\n873 1 0.250941 0.372209 0.749786\n874 1 0.307901 0.439774 0.753559\n875 1 0.321365 0.369122 0.805342\n876 1 0.245957 0.428628 0.815104\n1799 1 0.197322 0.496355 0.823153\n109 1 0.377572 0.375775 1.00226\n797 1 0.864488 0.00739925 0.740892\n751 1 0.453706 0.370432 0.691569\n752 1 0.374416 0.443539 0.701449\n877 1 0.371672 0.371114 0.741139\n878 1 0.449601 0.436262 0.735531\n879 1 0.434897 0.37147 0.802382\n880 1 0.38348 0.438485 0.808559\n756 1 0.510034 0.43142 0.682118\n1017 1 0.765587 0.386833 0.870317\n881 1 0.510223 0.372983 0.748723\n884 1 0.495834 0.429418 0.806619\n755 1 0.563993 0.377886 0.681078\n760 1 0.611572 0.446979 0.694808\n882 1 0.55866 0.439022 0.751724\n883 1 0.554917 0.375676 0.828504\n885 1 0.617291 0.381724 0.757394\n888 1 0.615151 0.440755 0.812042\n887 1 0.68787 0.380674 0.816935\n759 1 0.673878 0.391367 0.685545\n763 1 0.797642 0.370948 0.689143\n764 1 0.741408 0.435976 0.697203\n886 1 0.67265 0.436283 0.752501\n889 1 0.735456 0.374061 0.750919\n890 1 0.794701 0.423626 0.745325\n892 1 0.7459 0.440172 0.802061\n891 1 0.809663 0.377387 0.809343\n125 1 0.874302 0.367578 0.990146\n740 1 0.995782 0.426671 0.690559\n868 1 0.00494423 0.434499 0.811835\n767 1 0.929546 0.367931 0.700073\n768 1 0.858297 0.434147 0.685045\n893 1 0.860196 0.378294 0.74083\n894 1 0.924764 0.430266 0.748432\n895 1 0.925601 0.374401 0.81032\n896 1 0.858918 0.439914 0.810478\n1020 1 0.752309 0.446778 0.931014\n643 1 0.0672311 0.00676494 0.701434\n1675 1 0.317221 0.484658 0.663423\n898 1 0.0613656 0.0630173 0.868226\n904 1 0.123655 0.0751259 0.921774\n931 1 0.0694937 0.135335 0.930972\n933 1 0.138094 0.135103 0.870813\n929 1 0.00236131 0.130199 0.877928\n900 1 0.994383 0.0749211 0.942648\n523 1 0.317054 0.014835 0.565368\n795 1 0.80383 0.0104227 0.821815\n902 1 0.191486 0.0644111 0.882345\n908 1 0.260042 0.0628228 0.941534\n935 1 0.197159 0.130472 0.930539\n937 1 0.25864 0.125522 0.876892\n939 1 0.311682 0.122772 0.93632\n906 1 0.309172 0.061501 0.867961\n657 1 0.503991 0.00918264 0.628989\n1817 1 0.74529 0.500766 0.74829\n26 1 0.812734 0.0634912 0.996121\n535 1 0.689673 0.00759371 0.555853\n910 1 0.443395 0.0481133 0.869012\n912 1 0.384922 0.0527096 0.936597\n941 1 0.379162 0.112514 0.877704\n943 1 0.437362 0.112478 0.95304\n945 1 0.496657 0.123595 0.880795\n916 1 0.503603 0.0624516 0.933307\n925 1 0.854719 0.00452411 0.878801\n1551 1 0.427879 0.490271 0.565312\n1023 1 0.939769 0.374638 0.925577\n93 1 0.874642 0.254823 0.987877\n1925 1 0.13666 0.497285 0.870083\n914 1 0.561651 0.0579226 0.876146\n920 1 0.615684 0.0519085 0.935807\n947 1 0.559551 0.123682 0.931625\n949 1 0.616019 0.124332 0.87368\n1537 1 0.997004 0.495193 0.505349\n73 1 0.252002 0.248931 0.996833\n573 1 0.884574 0.122166 0.502371\n1005 1 0.372345 0.380741 0.870639\n1669 1 0.115979 0.493673 0.636089\n1016 1 0.631721 0.444565 0.939791\n1009 1 0.488395 0.368177 0.87686\n918 1 0.676665 0.0653261 0.884339\n924 1 0.739446 0.0646824 0.937612\n951 1 0.677455 0.129606 0.938196\n953 1 0.741824 0.128242 0.879599\n922 1 0.807514 0.0727506 0.876135\n955 1 0.804646 0.12974 0.943862\n22 1 0.675882 0.065562 0.997967\n1695 1 0.927657 0.491638 0.700228\n1014 1 0.689265 0.444283 0.861571\n1006 1 0.438934 0.450173 0.862196\n781 1 0.371686 0.00500199 0.750594\n94 1 0.942767 0.308913 0.993551\n926 1 0.92827 0.0655167 0.877046\n928 1 0.865281 0.0710357 0.929173\n957 1 0.868745 0.12817 0.872464\n959 1 0.938038 0.133409 0.924102\n1545 1 0.256376 0.497637 0.503224\n659 1 0.561049 0.00831303 0.691219\n930 1 0.0543391 0.20128 0.865714\n936 1 0.132334 0.188641 0.939134\n963 1 0.0688132 0.254779 0.941527\n965 1 0.120503 0.257652 0.886449\n968 1 0.129665 0.322635 0.937982\n962 1 0.051998 0.307924 0.880302\n932 1 0.997741 0.198185 0.941885\n964 1 0.997716 0.312824 0.939553\n1567 1 0.938794 0.487399 0.561508\n966 1 0.191549 0.319672 0.88808\n934 1 0.197489 0.191123 0.880979\n938 1 0.311915 0.195425 0.875408\n940 1 0.259947 0.18993 0.941708\n967 1 0.191033 0.253178 0.93274\n969 1 0.251958 0.254364 0.872047\n970 1 0.32039 0.311859 0.862201\n971 1 0.31365 0.25703 0.938101\n972 1 0.258339 0.308182 0.938376\n533 1 0.6129 0.00353819 0.506633\n2 1 0.0647357 0.0502504 0.99087\n976 1 0.363557 0.321856 0.938613\n973 1 0.375356 0.250306 0.864665\n944 1 0.368819 0.179409 0.9388\n942 1 0.441374 0.175044 0.895025\n975 1 0.43699 0.253035 0.937368\n974 1 0.431345 0.310733 0.875805\n948 1 0.503838 0.197166 0.943427\n977 1 0.497884 0.254112 0.885279\n1012 1 0.499641 0.460274 0.935019\n1941 1 0.627182 0.497764 0.869112\n980 1 0.50252 0.314056 0.931585\n946 1 0.555033 0.1845 0.876698\n984 1 0.626371 0.306688 0.946601\n978 1 0.566051 0.319889 0.879251\n981 1 0.617845 0.256429 0.89202\n979 1 0.560126 0.250232 0.942411\n952 1 0.619104 0.183156 0.935559\n1008 1 0.385711 0.438677 0.931999\n1011 1 0.559626 0.380224 0.940523\n983 1 0.681466 0.235702 0.9456\n988 1 0.740978 0.307487 0.941992\n954 1 0.810894 0.192618 0.886828\n982 1 0.682331 0.309511 0.887534\n986 1 0.799991 0.301807 0.889284\n987 1 0.808134 0.24226 0.942136\n985 1 0.737115 0.245808 0.880955\n950 1 0.687989 0.186515 0.876156\n956 1 0.742123 0.187264 0.948549\n961 1 0.988083 0.244268 0.851732\n960 1 0.875745 0.190876 0.936923\n992 1 0.882739 0.311569 0.935379\n991 1 0.939487 0.247349 0.929848\n990 1 0.932192 0.312302 0.868165\n989 1 0.868313 0.255012 0.879209\n958 1 0.927933 0.191616 0.863856\n1010 1 0.556412 0.442261 0.874649\n570 1 0.813858 0.176244 0.500264\n66 1 0.0698777 0.327025 1.00156\n993 1 0.99814 0.373222 0.870495\n996 1 0.00811803 0.457013 0.922\n995 1 0.0698222 0.377218 0.933566\n1000 1 0.128727 0.428116 0.938659\n994 1 0.0785669 0.439214 0.869247\n997 1 0.13216 0.373596 0.872516\n1015 1 0.691528 0.379211 0.931262\n117 1 0.625398 0.381386 0.992024\n998 1 0.186941 0.432743 0.880603\n1018 1 0.81509 0.449319 0.877918\n999 1 0.200754 0.378231 0.94332\n553 1 0.24495 0.124791 0.512439\n1003 1 0.314347 0.378686 0.927802\n1004 1 0.253811 0.440098 0.940141\n1001 1 0.255648 0.366124 0.875598\n1002 1 0.330136 0.451289 0.872533\n1013 1 0.626954 0.383265 0.876812\n665 1 0.739597 0.00308836 0.626055\n1007 1 0.439974 0.375165 0.932601\n667 1 0.798982 0.00391006 0.689776\n651 1 0.32286 0.00444817 0.683585\n581 1 0.120313 0.236194 0.519289\n61 1 0.869894 0.124039 0.984871\n69 1 0.132798 0.24778 0.991381\n769 1 0.00247514 0.00606499 0.756186\n641 1 0.994021 0.00782668 0.636512\n921 1 0.739977 0.0133991 0.873053\n645 1 0.125911 0.0133496 0.62217\n609 1 0.993693 0.368105 0.499501\n534 1 0.694205 0.0644564 0.508526\n634 1 0.802983 0.442205 0.511319\n537 1 0.755935 0.00980972 0.497739\n1687 1 0.670713 0.497372 0.68666\n58 1 0.821456 0.18816 0.995762\n543 1 0.933483 0.00718876 0.565534\n515 1 0.0687268 0.00211202 0.558718\n81 1 0.49411 0.259264 0.995162\n605 1 0.871823 0.238154 0.507667\n1539 1 0.0479169 0.493943 0.591345\n787 1 0.571082 0.000748198 0.811635\n6 1 0.198467 0.0532124 0.998825\n54 1 0.675001 0.178463 0.999837\n1673 1 0.248195 0.492862 0.621208\n1823 1 0.927538 0.491391 0.808734\n649 1 0.250452 0.00714321 0.624116\n30 1 0.933437 0.0523442 0.996387\n653 1 0.37895 0.00997412 0.61564\n113 1 0.502067 0.376188 0.994431\n561 1 0.498244 0.119812 0.507348\n70 1 0.186391 0.308723 0.997168\n597 1 0.61723 0.247419 0.502972\n557 1 0.370899 0.124992 0.50278\n1685 1 0.589002 0.497343 0.63002\n1033 1 0.244702 0.498192 0.995414\n799 1 0.929589 0.0045761 0.830962\n773 1 0.129405 0.00972938 0.757891\n531 1 0.564092 0.00327346 0.569308\n917 1 0.630032 0.00269852 0.872503\n78 1 0.439115 0.321842 0.996373\n97 1 0.00461453 0.374074 0.998675\n53 1 0.61427 0.117742 0.997112\n86 1 0.682292 0.317677 0.996064\n1557 1 0.62951 0.487546 0.503293\n29 1 0.866811 0.00194934 0.987609\n574 1 0.944124 0.188366 0.502859\n1541 1 0.119126 0.497553 0.504977\n106 1 0.305832 0.428343 0.993804\n77 1 0.375994 0.239108 0.993437\n517 1 0.137691 0.000757272 0.511744\n629 1 0.637312 0.369592 0.500184\n1032 1 0.133235 0.561034 0.0650331\n1059 1 0.0586676 0.637557 0.0726162\n1154 1 0.0739497 0.580621 0.127382\n1189 1 0.131222 0.62961 0.121198\n1550 1 0.439669 0.54319 0.492639\n513 1 1.00283 0.998181 0.501329\n1114 1 0.817906 0.815522 0.00978496\n269 1 0.373546 0.995092 0.256814\n1036 1 0.241128 0.560142 0.0493492\n1063 1 0.191115 0.630051 0.0563151\n1158 1 0.186827 0.57201 0.127304\n1162 1 0.300596 0.561395 0.123536\n1193 1 0.253364 0.637328 0.126995\n1067 1 0.301713 0.612293 0.0517501\n1625 1 0.76096 0.75115 0.488216\n1295 1 0.422838 0.507177 0.316779\n1040 1 0.375127 0.543647 0.0710112\n1071 1 0.437419 0.619215 0.0694525\n1166 1 0.433408 0.560372 0.130668\n1197 1 0.370957 0.622012 0.124942\n1201 1 0.494322 0.631289 0.12229\n1505 1 0.000407664 0.883093 0.378181\n1175 1 0.687789 0.504505 0.178251\n1055 1 0.950092 0.507398 0.0623324\n1044 1 0.495512 0.563475 0.0650434\n1048 1 0.63466 0.569894 0.0551154\n1075 1 0.562129 0.612496 0.0498027\n1170 1 0.573004 0.562863 0.116387\n1205 1 0.629269 0.630995 0.117921\n1411 1 0.0612997 0.498157 0.442046\n151 1 0.687651 0.990553 0.16032\n1605 1 0.104331 0.760778 0.493799\n1050 1 0.819265 0.572087 0.0114147\n139 1 0.322415 0.998076 0.194765\n1079 1 0.687893 0.634514 0.0755269\n1052 1 0.753471 0.572607 0.0574249\n1083 1 0.813201 0.628993 0.0736585\n1174 1 0.687942 0.566189 0.126013\n1178 1 0.813546 0.553115 0.120907\n1209 1 0.751518 0.624505 0.126565\n1641 1 0.245393 0.878659 0.492431\n415 1 0.942326 0.998241 0.442503\n1028 1 0.00816142 0.565529 0.0751015\n1185 1 0.00180562 0.631734 0.126592\n1056 1 0.879923 0.565408 0.0750482\n1087 1 0.951882 0.632446 0.0645866\n1182 1 0.942712 0.568383 0.124745\n1213 1 0.885437 0.634596 0.122144\n413 1 0.886334 0.99818 0.376267\n1163 1 0.300421 0.508392 0.18435\n1106 1 0.557258 0.820564 0.00666078\n1598 1 0.93495 0.690689 0.495428\n1064 1 0.127839 0.693499 0.0685189\n1091 1 0.0699725 0.764429 0.0740933\n1186 1 0.0614845 0.698755 0.128874\n1221 1 0.129525 0.746583 0.133732\n1096 1 0.136137 0.808968 0.0632818\n1218 1 0.0523898 0.826014 0.124584\n1060 1 0.995243 0.701652 0.0718728\n1092 1 0.995025 0.822291 0.0655567\n1281 1 0.992782 0.502444 0.244237\n1194 1 0.315522 0.701094 0.130761\n1068 1 0.245301 0.689668 0.0684342\n1095 1 0.195861 0.755594 0.0599096\n1099 1 0.308075 0.751091 0.0679935\n1190 1 0.190031 0.683934 0.121778\n1222 1 0.194195 0.82527 0.127545\n1225 1 0.25113 0.751185 0.122934\n1100 1 0.254734 0.810956 0.0617377\n1226 1 0.305526 0.808524 0.125233\n1601 1 0.00648317 0.750225 0.497759\n1072 1 0.361005 0.679988 0.0676568\n1103 1 0.442766 0.745477 0.0579489\n1104 1 0.372086 0.824643 0.0675941\n1198 1 0.427271 0.679658 0.120866\n1229 1 0.368399 0.760499 0.119022\n1230 1 0.446162 0.806941 0.130616\n1076 1 0.485598 0.68114 0.0617381\n1233 1 0.507083 0.75477 0.12522\n1108 1 0.498319 0.814404 0.0755306\n27 1 0.815712 0.999977 0.0587262\n1080 1 0.6163 0.669915 0.0458205\n1107 1 0.550679 0.754196 0.0576603\n1202 1 0.564899 0.678069 0.123355\n1234 1 0.569116 0.813227 0.129211\n1237 1 0.619634 0.749381 0.120215\n1112 1 0.625733 0.804603 0.0626961\n1653 1 0.630191 0.865702 0.498885\n1084 1 0.756306 0.673925 0.0606281\n1206 1 0.680165 0.698047 0.122837\n1111 1 0.687163 0.739551 0.0548369\n1115 1 0.816287 0.747896 0.071034\n1116 1 0.743666 0.818373 0.0639308\n1210 1 0.810691 0.693285 0.133089\n1238 1 0.691879 0.809235 0.122353\n1241 1 0.746861 0.750536 0.129485\n1242 1 0.81633 0.813066 0.104476\n1590 1 0.702385 0.693742 0.495736\n1217 1 0.00655718 0.756247 0.131706\n1214 1 0.939987 0.691333 0.124311\n1088 1 0.871121 0.694066 0.0678465\n1119 1 0.937299 0.752056 0.0664051\n1120 1 0.886809 0.807588 0.0580872\n1245 1 0.879082 0.750244 0.136912\n1246 1 0.939288 0.80936 0.126067\n1070 1 0.42958 0.686464 0.00650336\n1123 1 0.0596838 0.887568 0.0663159\n1128 1 0.122979 0.944939 0.0572074\n1250 1 0.0666657 0.943325 0.12586\n1253 1 0.121937 0.874647 0.114281\n1047 1 0.68657 0.505201 0.0554006\n1124 1 0.0031593 0.93658 0.0666966\n287 1 0.937224 0.994572 0.306754\n1536 1 0.869799 0.951279 0.430329\n133 1 0.122048 0.996789 0.136105\n1586 1 0.570195 0.707681 0.499556\n1027 1 0.0640324 0.513958 0.0707043\n1127 1 0.190401 0.87645 0.0657779\n1131 1 0.31179 0.874753 0.0635236\n1132 1 0.245847 0.935893 0.0728158\n1254 1 0.182995 0.940973 0.129101\n1257 1 0.258976 0.876393 0.13629\n1258 1 0.32436 0.930958 0.132833\n149 1 0.616507 0.992052 0.104045\n1135 1 0.437595 0.867413 0.0654568\n1136 1 0.365456 0.931483 0.0513803\n1261 1 0.37006 0.866144 0.134021\n1262 1 0.434643 0.939427 0.126459\n1299 1 0.563008 0.501009 0.30015\n281 1 0.745122 1.00039 0.26209\n129 1 1.0006 0.993881 0.137773\n1140 1 0.49759 0.929771 0.0649851\n1265 1 0.498347 0.873021 0.13379\n1139 1 0.552353 0.869766 0.0739318\n1144 1 0.614542 0.931442 0.0570055\n1266 1 0.565693 0.923317 0.125338\n1269 1 0.626279 0.866736 0.117354\n1549 1 0.377392 0.495945 0.497437\n1057 1 0.0100486 0.643233 0.000380593\n1143 1 0.684961 0.873324 0.0537199\n1148 1 0.752381 0.936153 0.0626681\n1270 1 0.676906 0.92777 0.105346\n1273 1 0.742616 0.874345 0.125777\n1274 1 0.81732 0.94831 0.132697\n1147 1 0.808221 0.879711 0.0679622\n1293 1 0.381731 0.502761 0.247338\n1249 1 0.989633 0.889199 0.12485\n1029 1 0.121273 0.501545 0.00901012\n1618 1 0.557474 0.805777 0.496329\n1151 1 0.929835 0.88225 0.0652406\n1152 1 0.876235 0.952498 0.071984\n1277 1 0.866018 0.877353 0.124521\n1278 1 0.93645 0.940872 0.128817\n1533 1 0.886348 0.891964 0.372833\n1160 1 0.124317 0.55187 0.18899\n1187 1 0.0596389 0.62767 0.190236\n1282 1 0.0554381 0.552936 0.246384\n1288 1 0.108349 0.57531 0.318683\n1315 1 0.0401748 0.615772 0.323041\n1317 1 0.120263 0.636554 0.248692\n1156 1 0.999082 0.576276 0.195346\n1191 1 0.177629 0.634601 0.190221\n1319 1 0.176227 0.631448 0.306555\n1164 1 0.239083 0.567913 0.19813\n1195 1 0.314153 0.623003 0.180706\n1286 1 0.169737 0.561683 0.265295\n1292 1 0.237911 0.562719 0.306394\n1321 1 0.244819 0.634852 0.250765\n1323 1 0.305677 0.623025 0.301422\n1290 1 0.306611 0.559565 0.250778\n271 1 0.425484 0.985969 0.309516\n1413 1 0.137925 0.501129 0.385215\n1291 1 0.318934 0.510607 0.307187\n1168 1 0.36209 0.565308 0.187143\n1199 1 0.426877 0.623958 0.188477\n1294 1 0.436035 0.559093 0.250092\n1296 1 0.370406 0.570188 0.320039\n1325 1 0.373233 0.620369 0.24623\n1327 1 0.434114 0.622123 0.305384\n1329 1 0.49739 0.621183 0.245113\n1172 1 0.497511 0.561766 0.18752\n1300 1 0.495549 0.560016 0.299613\n137 1 0.251871 0.993853 0.127691\n1102 1 0.443431 0.81075 0.00129525\n1514 1 0.315598 0.927065 0.38465\n1203 1 0.552921 0.619137 0.182589\n1176 1 0.62598 0.568962 0.189452\n1298 1 0.560023 0.565583 0.236581\n1331 1 0.560577 0.624349 0.306652\n1333 1 0.616072 0.623112 0.251498\n1304 1 0.622794 0.553174 0.304471\n1602 1 0.0522865 0.822853 0.486641\n1511 1 0.181348 0.875148 0.433815\n1303 1 0.697441 0.515133 0.315075\n1510 1 0.182922 0.927564 0.370463\n1153 1 0.00159339 0.510011 0.132246\n1302 1 0.69164 0.571547 0.243349\n1180 1 0.748706 0.556252 0.190707\n1207 1 0.689478 0.620189 0.189493\n1211 1 0.805694 0.6306 0.183435\n1308 1 0.775273 0.56337 0.301551\n1335 1 0.692102 0.615278 0.306401\n1337 1 0.749545 0.63156 0.245447\n1339 1 0.819866 0.635292 0.309812\n1306 1 0.811308 0.55803 0.231348\n1513 1 0.244071 0.864521 0.38595\n1171 1 0.566545 0.500389 0.181876\n1621 1 0.631923 0.762306 0.488169\n1313 1 0.994634 0.631765 0.246013\n1284 1 0.995696 0.551982 0.311974\n1184 1 0.875629 0.547982 0.185209\n1215 1 0.933226 0.632645 0.183578\n1310 1 0.936715 0.562159 0.238991\n1312 1 0.865766 0.561073 0.299948\n1341 1 0.869597 0.620156 0.243452\n1343 1 0.917332 0.619389 0.305895\n1110 1 0.674344 0.813122 0.00331751\n1219 1 0.0568602 0.751833 0.194018\n1314 1 0.065473 0.693911 0.260349\n1320 1 0.115569 0.687513 0.318674\n1192 1 0.11853 0.692114 0.1844\n1346 1 0.0543641 0.807616 0.255415\n1349 1 0.127602 0.743393 0.256227\n1347 1 0.0551803 0.74417 0.320359\n1352 1 0.107359 0.802974 0.31769\n1220 1 0.000596447 0.809414 0.188338\n1224 1 0.116505 0.817024 0.180787\n1196 1 0.250121 0.686047 0.187921\n1324 1 0.259392 0.687834 0.310058\n1223 1 0.193646 0.742251 0.191136\n1228 1 0.25118 0.810718 0.181423\n1227 1 0.313994 0.760278 0.197984\n1318 1 0.188166 0.692449 0.251458\n1322 1 0.319501 0.693092 0.262319\n1350 1 0.194828 0.805546 0.253487\n1353 1 0.249945 0.750372 0.261307\n1355 1 0.310315 0.752825 0.317228\n1354 1 0.305525 0.815172 0.263189\n1351 1 0.187248 0.7546 0.309297\n1356 1 0.249897 0.814006 0.319405\n1232 1 0.379128 0.80414 0.194055\n1328 1 0.381634 0.682087 0.307549\n1200 1 0.373399 0.684068 0.19405\n1231 1 0.43115 0.745856 0.179053\n1326 1 0.447724 0.686786 0.248555\n1357 1 0.38776 0.745944 0.255059\n1358 1 0.441151 0.807455 0.245041\n1359 1 0.444851 0.742305 0.313211\n1360 1 0.38706 0.804325 0.302943\n1204 1 0.494516 0.6813 0.190735\n1332 1 0.493796 0.668262 0.314506\n1361 1 0.495211 0.746843 0.237131\n1364 1 0.506483 0.821925 0.306303\n1236 1 0.506984 0.814988 0.182385\n1208 1 0.627024 0.675718 0.187371\n1240 1 0.643076 0.817108 0.185844\n1235 1 0.569772 0.73934 0.179238\n1330 1 0.558555 0.677535 0.237407\n1336 1 0.625739 0.683716 0.308077\n1363 1 0.553606 0.738302 0.306793\n1365 1 0.616654 0.74081 0.24343\n1368 1 0.629558 0.805763 0.314848\n1362 1 0.555745 0.799449 0.236506\n1338 1 0.818583 0.690248 0.250477\n1212 1 0.747121 0.69403 0.192478\n1334 1 0.680354 0.68186 0.245354\n1239 1 0.686731 0.751977 0.184975\n1244 1 0.749028 0.814725 0.190383\n1366 1 0.686083 0.807072 0.256261\n1367 1 0.701421 0.74206 0.304875\n1369 1 0.754698 0.75545 0.253756\n1370 1 0.809227 0.811373 0.258672\n1371 1 0.822831 0.753225 0.318935\n1340 1 0.762237 0.68408 0.304886\n1243 1 0.814836 0.747858 0.192434\n1372 1 0.753379 0.810408 0.320684\n1348 1 1.0001 0.815635 0.314349\n1188 1 0.00642665 0.694041 0.181044\n1316 1 0.991819 0.685395 0.316389\n1345 1 0.998703 0.74541 0.264242\n1342 1 0.941162 0.693956 0.243705\n1216 1 0.876939 0.684748 0.181306\n1247 1 0.940871 0.747191 0.18817\n1373 1 0.869395 0.751679 0.251497\n1374 1 0.938376 0.808246 0.243504\n1375 1 0.946689 0.747105 0.321212\n1376 1 0.88743 0.814741 0.31907\n1344 1 0.884929 0.695426 0.320544\n1248 1 0.860768 0.81931 0.184841\n1301 1 0.631698 0.506035 0.23618\n1515 1 0.328513 0.859559 0.432246\n1256 1 0.115988 0.927083 0.19205\n1251 1 0.056614 0.872527 0.192329\n1378 1 0.059712 0.943552 0.251805\n1379 1 0.0548703 0.871643 0.320705\n1381 1 0.104363 0.873459 0.257873\n1384 1 0.128477 0.945254 0.314676\n1377 1 0.999855 0.875273 0.253319\n277 1 0.63119 0.986901 0.24403\n1382 1 0.188505 0.935543 0.252037\n1259 1 0.319568 0.870486 0.198392\n1260 1 0.268826 0.936053 0.193895\n1383 1 0.187897 0.864096 0.310039\n1255 1 0.185078 0.87475 0.187069\n1385 1 0.245109 0.872171 0.243107\n1386 1 0.31854 0.928998 0.257499\n1387 1 0.305869 0.869828 0.321704\n1388 1 0.251641 0.927537 0.320532\n7 1 0.198564 0.99612 0.0575\n1530 1 0.80992 0.939724 0.369764\n1043 1 0.562244 0.5123 0.0546134\n1433 1 0.753229 0.505532 0.380009\n1431 1 0.695405 0.508277 0.432965\n1263 1 0.436833 0.881672 0.185239\n1264 1 0.375752 0.93628 0.199638\n1389 1 0.388833 0.872765 0.249295\n1390 1 0.441112 0.940178 0.250485\n1391 1 0.435147 0.867973 0.312681\n1392 1 0.372969 0.923245 0.314112\n1396 1 0.500718 0.92868 0.307549\n1268 1 0.507309 0.944716 0.182893\n15 1 0.426109 0.991152 0.0615466\n393 1 0.240266 0.99118 0.372025\n1169 1 0.512484 0.50253 0.119705\n1393 1 0.501362 0.874548 0.246765\n1272 1 0.625497 0.930828 0.168602\n1267 1 0.568259 0.872729 0.181549\n1394 1 0.566852 0.93386 0.239616\n1395 1 0.579821 0.868334 0.300508\n1397 1 0.629039 0.887544 0.238737\n1400 1 0.627316 0.93338 0.307357\n1137 1 0.497802 0.873823 0.0032286\n1520 1 0.368146 0.933128 0.449441\n1025 1 0.0158237 0.51308 0.00310311\n1518 1 0.437449 0.930498 0.36552\n131 1 0.0650039 1.00232 0.188958\n1271 1 0.697443 0.869624 0.205361\n1275 1 0.806957 0.876447 0.178995\n1276 1 0.754934 0.933319 0.182547\n1398 1 0.698634 0.941057 0.238672\n1401 1 0.763326 0.87927 0.263544\n1402 1 0.810581 0.929448 0.239313\n1399 1 0.690096 0.874319 0.301068\n1403 1 0.818765 0.867487 0.324803\n1404 1 0.747818 0.938377 0.324636\n1380 1 0.00366803 0.940854 0.317312\n1523 1 0.564699 0.870109 0.446106\n1252 1 0.00560034 0.929997 0.193947\n1279 1 0.942155 0.874923 0.191388\n1280 1 0.868857 0.943369 0.186003\n1405 1 0.877198 0.867862 0.239851\n1406 1 0.934559 0.930676 0.243732\n1407 1 0.941966 0.880867 0.310797\n1408 1 0.865163 0.934984 0.3126\n1058 1 0.0693244 0.703718 0.0183677\n1090 1 0.0559742 0.811251 0.00631994\n1410 1 0.042862 0.558842 0.378389\n1416 1 0.111456 0.567036 0.435008\n1443 1 0.0426487 0.622204 0.449186\n1445 1 0.117288 0.631849 0.37358\n1516 1 0.244987 0.943954 0.430471\n1414 1 0.175397 0.56856 0.361875\n1447 1 0.177764 0.628261 0.440314\n1449 1 0.242923 0.615297 0.371379\n1451 1 0.309592 0.632092 0.432086\n1420 1 0.231452 0.56416 0.434838\n1418 1 0.302987 0.565798 0.372536\n1528 1 0.622406 0.95027 0.449106\n1519 1 0.419161 0.867605 0.442696\n1069 1 0.371778 0.62248 0.0106544\n1424 1 0.359625 0.57112 0.423186\n1455 1 0.438769 0.617492 0.44561\n1422 1 0.427495 0.56383 0.384787\n1453 1 0.368762 0.631218 0.370317\n1457 1 0.493041 0.616667 0.381092\n1428 1 0.501729 0.559546 0.434977\n1517 1 0.3723 0.857821 0.3719\n13 1 0.378656 0.994098 -0.000898437\n273 1 0.493627 0.998675 0.244443\n1461 1 0.620828 0.625681 0.360262\n1426 1 0.564159 0.57412 0.373229\n1432 1 0.620649 0.559386 0.437669\n1459 1 0.556569 0.621189 0.440926\n1121 1 0.999149 0.881093 0.0101904\n1522 1 0.563319 0.944453 0.368021\n1535 1 0.925158 0.876444 0.438929\n1532 1 0.768895 0.933039 0.440824\n11 1 0.318685 0.988755 0.0723316\n1525 1 0.629098 0.874014 0.368293\n409 1 0.746377 0.99292 0.395857\n1430 1 0.685249 0.571648 0.375767\n1463 1 0.674916 0.623694 0.453933\n1436 1 0.745899 0.573401 0.430938\n1434 1 0.815125 0.561766 0.369813\n1465 1 0.756727 0.624474 0.363823\n1467 1 0.81923 0.624107 0.432056\n1577 1 0.245904 0.6231 0.492816\n135 1 0.185615 0.993839 0.190334\n1441 1 0.983551 0.62484 0.381598\n1412 1 0.993827 0.556068 0.440082\n1471 1 0.929276 0.606929 0.432789\n1469 1 0.879768 0.631488 0.371088\n1440 1 0.872454 0.553947 0.448168\n1438 1 0.933793 0.545717 0.371935\n275 1 0.557671 0.992471 0.30573\n1085 1 0.883255 0.636118 0.0169433\n261 1 0.124433 0.99248 0.246341\n1524 1 0.482088 0.931854 0.433937\n1442 1 0.0502404 0.683779 0.382258\n1475 1 0.0556208 0.755547 0.426243\n1448 1 0.121797 0.692712 0.441504\n1480 1 0.122575 0.817554 0.433904\n1477 1 0.1219 0.745246 0.36833\n1474 1 0.0565942 0.81947 0.374376\n1444 1 0.997911 0.686139 0.439719\n1038 1 0.432624 0.567 0.00907785\n1529 1 0.756535 0.866175 0.37623\n403 1 0.561492 0.996937 0.438605\n1446 1 0.19177 0.685558 0.364184\n1484 1 0.242112 0.802929 0.437542\n1452 1 0.252949 0.684086 0.429663\n1482 1 0.317545 0.802525 0.377749\n1481 1 0.247063 0.746369 0.378159\n1479 1 0.187372 0.73618 0.437359\n1483 1 0.311614 0.749258 0.443591\n1450 1 0.304705 0.679851 0.371667\n1478 1 0.179555 0.798902 0.383845\n1297 1 0.497809 0.500808 0.240999\n1534 1 0.949825 0.944416 0.378567\n1507 1 0.068096 0.880509 0.426625\n1456 1 0.377173 0.69294 0.445917\n1454 1 0.434328 0.678464 0.384414\n1485 1 0.374005 0.726527 0.377668\n1486 1 0.436721 0.801911 0.360903\n1487 1 0.440483 0.748544 0.442983\n1488 1 0.384652 0.801439 0.422989\n1460 1 0.504099 0.687529 0.446584\n1492 1 0.497055 0.815032 0.421832\n1521 1 0.502428 0.879231 0.370169\n1489 1 0.494012 0.739515 0.376803\n1491 1 0.561769 0.754256 0.426809\n1464 1 0.616987 0.680419 0.429702\n1496 1 0.621169 0.814891 0.430857\n1490 1 0.565071 0.808268 0.366689\n1458 1 0.556428 0.680281 0.373598\n1493 1 0.630485 0.75092 0.376642\n1526 1 0.685538 0.940053 0.37344\n1527 1 0.690616 0.863922 0.433177\n1462 1 0.695422 0.679148 0.374252\n1495 1 0.69057 0.756665 0.432418\n1494 1 0.69614 0.800915 0.368994\n1497 1 0.752612 0.748243 0.37851\n1466 1 0.81696 0.692955 0.37424\n1468 1 0.752415 0.684987 0.435497\n1498 1 0.825884 0.804665 0.383675\n1499 1 0.812949 0.748268 0.434662\n1500 1 0.754718 0.809002 0.427472\n1508 1 1.00222 0.93214 0.443844\n1531 1 0.838776 0.874303 0.441805\n1476 1 0.000856156 0.827275 0.433421\n1473 1 0.998605 0.763281 0.379959\n1503 1 0.945635 0.752704 0.441636\n1504 1 0.884979 0.801329 0.453017\n1470 1 0.943994 0.69155 0.377458\n1501 1 0.876089 0.753669 0.378416\n1472 1 0.885302 0.685537 0.434468\n1502 1 0.942837 0.817203 0.374036\n1155 1 0.0684256 0.500601 0.17778\n1512 1 0.128278 0.949063 0.429777\n1506 1 0.0620351 0.943838 0.380049\n1509 1 0.121233 0.874431 0.363241\n1159 1 0.192719 0.502829 0.180722\n159 1 0.940083 0.998896 0.191973\n1078 1 0.688184 0.675016 0.00145789\n1287 1 0.180811 0.501548 0.32047\n1086 1 0.941446 0.691557 0.00809947\n1062 1 0.183397 0.702042 0.00587774\n1435 1 0.799277 0.508407 0.451298\n1129 1 0.240927 0.876132 0.00392808\n267 1 0.313198 0.988181 0.31507\n1101 1 0.367541 0.760817 0.0049146\n397 1 0.380415 0.989645 0.379608\n1054 1 0.950118 0.573178 0.00821139\n1146 1 0.814166 0.934567 0.00810057\n395 1 0.312314 0.994487 0.433568\n1098 1 0.320981 0.822353 0.00180813\n1425 1 0.49833 0.508944 0.362397\n1118 1 0.939887 0.818994 -0.000637169\n1145 1 0.7596 0.8725 0.00391901\n1051 1 0.821815 0.508287 0.0551497\n141 1 0.376337 0.993587 0.127887\n145 1 0.497106 0.991163 0.121556\n1610 1 0.306183 0.807197 0.493576\n147 1 0.570846 0.993082 0.173029\n1113 1 0.753949 0.751546 0.00926469\n1637 1 0.114624 0.890239 0.486876\n1581 1 0.371533 0.614468 0.485541\n1429 1 0.636688 0.514651 0.37301\n1626 1 0.808655 0.817852 0.493836\n153 1 0.757336 0.99586 0.118092\n1125 1 0.122904 0.868934 0.00765674\n1638 1 0.183489 0.927924 0.492131\n1181 1 0.887888 0.505015 0.119548\n1031 1 0.183796 0.501276 0.0593412\n1650 1 0.552711 0.933376 0.495678\n1649 1 0.495795 0.879945 0.495413\n1578 1 0.31168 0.677296 0.497131\n1142 1 0.682859 0.933968 0.00299223\n1617 1 0.500274 0.750047 0.496298\n1662 1 0.932927 0.935847 0.49877\n1041 1 0.494601 0.506405 0.00870656\n1646 1 0.433205 0.943392 0.497395\n1546 1 0.31653 0.55352 0.493133\n1053 1 0.887049 0.520261 0.00890926\n1554 1 0.562882 0.566209 0.496012\n1066 1 0.306269 0.685622 0.00766841\n1613 1 0.377064 0.763467 0.495608\n1657 1 0.754245 0.869688 0.493538\n1544 1 0.121665 0.545454 0.571509\n1571 1 0.063624 0.618332 0.571809\n1666 1 0.0628063 0.559336 0.631791\n1701 1 0.12836 0.620363 0.621311\n1815 1 0.690821 0.502217 0.805573\n655 1 0.438788 0.99353 0.696012\n1677 1 0.372802 0.505904 0.615743\n1670 1 0.175156 0.55122 0.633289\n1548 1 0.249473 0.562585 0.55321\n1575 1 0.187504 0.618762 0.565768\n1579 1 0.315511 0.615202 0.560737\n1674 1 0.306723 0.555726 0.610175\n1705 1 0.242289 0.597475 0.629283\n1634 1 0.0615921 0.941919 0.5009\n1642 1 0.323966 0.931165 0.508913\n1552 1 0.376024 0.560946 0.558733\n1583 1 0.435314 0.62624 0.563682\n1678 1 0.428519 0.557057 0.630561\n1709 1 0.376942 0.615762 0.617361\n1093 1 0.124977 0.757322 0.990251\n1094 1 0.183528 0.816186 0.994199\n907 1 0.310108 0.999604 0.939133\n1681 1 0.497049 0.510205 0.615667\n1126 1 0.180284 0.934612 0.999037\n1713 1 0.496748 0.624594 0.625214\n1556 1 0.495687 0.566301 0.550633\n1560 1 0.612712 0.57473 0.562055\n1587 1 0.548683 0.63202 0.569533\n1682 1 0.551243 0.569872 0.618884\n1717 1 0.622074 0.623607 0.61942\n1542 1 0.179123 0.569012 0.499036\n1945 1 0.748661 0.503078 0.872272\n1049 1 0.754762 0.511397 0.998387\n1686 1 0.674149 0.556437 0.628631\n1591 1 0.67083 0.625669 0.558122\n1564 1 0.742824 0.551138 0.568639\n1595 1 0.807555 0.631617 0.568772\n1690 1 0.803111 0.565823 0.642341\n1721 1 0.738181 0.621418 0.620602\n1089 1 0.99698 0.750824 0.988435\n785 1 0.501597 0.99317 0.766311\n1540 1 0.991709 0.541316 0.572975\n1697 1 0.003793 0.630757 0.624785\n1568 1 0.886957 0.545359 0.572802\n1599 1 0.929644 0.627991 0.582269\n1694 1 0.929944 0.560461 0.637449\n1725 1 0.867096 0.628719 0.628218\n1594 1 0.810791 0.683801 0.503461\n1597 1 0.880726 0.623946 0.506215\n1576 1 0.122725 0.681758 0.562856\n1603 1 0.0666914 0.743317 0.56425\n1608 1 0.115876 0.833292 0.559628\n1698 1 0.0714704 0.688873 0.630708\n1730 1 0.0740393 0.813872 0.633165\n1733 1 0.132838 0.745556 0.622476\n1572 1 -0.000556281 0.68961 0.562839\n1604 1 0.017896 0.826916 0.57073\n1563 1 0.804538 0.500098 0.573575\n1706 1 0.310297 0.663052 0.620551\n1580 1 0.244078 0.668006 0.565804\n1734 1 0.182354 0.816014 0.613306\n1607 1 0.179102 0.764018 0.55542\n1611 1 0.301484 0.750436 0.557633\n1702 1 0.178468 0.679754 0.612239\n1737 1 0.242568 0.75622 0.616045\n1612 1 0.240354 0.823968 0.559697\n1738 1 0.306877 0.810414 0.617815\n21 1 0.616387 0.999011 1.00138\n1939 1 0.558736 0.514593 0.921923\n783 1 0.437036 0.997417 0.813286\n1710 1 0.432011 0.683906 0.61647\n1584 1 0.366267 0.678929 0.56088\n1615 1 0.437671 0.759069 0.558858\n1616 1 0.374146 0.815283 0.566781\n1741 1 0.370327 0.746319 0.609901\n1742 1 0.426676 0.812449 0.639123\n1620 1 0.496526 0.82184 0.57332\n1745 1 0.477117 0.7501 0.624268\n1138 1 0.561235 0.930025 0.985825\n1803 1 0.314777 0.506294 0.814359\n1558 1 0.673205 0.55983 0.500578\n1588 1 0.494905 0.695021 0.565324\n1714 1 0.555178 0.693374 0.623648\n1592 1 0.617948 0.698786 0.57071\n1619 1 0.55472 0.748431 0.569215\n1624 1 0.613347 0.810649 0.560916\n1749 1 0.615425 0.752991 0.624358\n1746 1 0.565519 0.817885 0.625453\n1933 1 0.372707 0.518584 0.87092\n1750 1 0.672646 0.817925 0.621548\n1623 1 0.691297 0.752131 0.560673\n1722 1 0.813058 0.694625 0.619634\n1596 1 0.744836 0.679668 0.557279\n1627 1 0.807008 0.753478 0.560398\n1718 1 0.686482 0.691529 0.611673\n1753 1 0.747623 0.743544 0.627421\n1754 1 0.800194 0.819207 0.631896\n1628 1 0.746602 0.817897 0.57384\n1077 1 0.611278 0.621463 0.983004\n1150 1 0.94327 0.940193 0.994561\n1729 1 0.00888984 0.751953 0.618258\n1600 1 0.875497 0.689167 0.571216\n1631 1 0.941973 0.752289 0.564112\n1632 1 0.881114 0.801619 0.565994\n1726 1 0.950103 0.694073 0.629755\n1757 1 0.866146 0.758128 0.62659\n1758 1 0.94724 0.810832 0.634767\n1046 1 0.683533 0.572403 0.989215\n1805 1 0.357523 0.516155 0.741926\n1117 1 0.878328 0.74766 1.0015\n1635 1 0.0649191 0.899693 0.582867\n1640 1 0.12668 0.940492 0.561636\n1762 1 0.0629821 0.960842 0.635923\n1765 1 0.134139 0.88912 0.625939\n1636 1 1.0014 0.957494 0.566894\n1585 1 0.492549 0.63826 0.509699\n791 1 0.68283 0.984469 0.808128\n519 1 0.194853 0.993809 0.566386\n1065 1 0.247686 0.617587 0.99621\n1030 1 0.175597 0.565255 0.979984\n1811 1 0.554248 0.508686 0.819025\n1133 1 0.38484 0.874308 0.998213\n915 1 0.561452 0.994287 0.937055\n1658 1 0.823704 0.934253 0.506845\n1639 1 0.186577 0.885971 0.564005\n1643 1 0.310415 0.876874 0.569834\n1644 1 0.257177 0.941846 0.553909\n1766 1 0.195955 0.944 0.628182\n1769 1 0.248908 0.885643 0.621869\n1770 1 0.306919 0.953591 0.629603\n1589 1 0.615543 0.643728 0.506151\n1821 1 0.861789 0.50271 0.75528\n1645 1 0.388153 0.870794 0.512407\n1691 1 0.807681 0.500714 0.689278\n1807 1 0.416881 0.511742 0.805746\n1647 1 0.436124 0.882545 0.580881\n1648 1 0.373548 0.936779 0.571827\n1773 1 0.375007 0.875972 0.635365\n1774 1 0.4536 0.947215 0.626206\n1777 1 0.507019 0.878509 0.629222\n1652 1 0.508989 0.944656 0.563082\n2047 1 0.933044 0.880949 0.942604\n1797 1 0.148613 0.503145 0.748645\n1651 1 0.564103 0.886411 0.559446\n1656 1 0.630169 0.937883 0.551064\n1778 1 0.556063 0.942109 0.630978\n1781 1 0.61183 0.882144 0.625104\n1931 1 0.317739 0.50168 0.937694\n1629 1 0.873727 0.740938 0.50593\n909 1 0.379052 0.998461 0.877808\n2033 1 0.501867 0.873195 0.875425\n1614 1 0.444948 0.811565 0.50599\n1813 1 0.612454 0.503909 0.758155\n2048 1 0.876905 0.93835 0.934652\n1655 1 0.684886 0.874827 0.563533\n1659 1 0.814761 0.876564 0.57863\n1660 1 0.757306 0.938 0.573524\n1782 1 0.690981 0.932633 0.615783\n1785 1 0.740195 0.869027 0.627092\n1786 1 0.816008 0.942039 0.626337\n1573 1 0.119821 0.624897 0.50037\n1633 1 0.00912669 0.88797 0.514299\n2046 1 0.937101 0.937738 0.881597\n1761 1 1.0029 0.879276 0.634081\n1951 1 0.951043 0.516274 0.945131\n1663 1 0.941191 0.877032 0.565033\n1664 1 0.874973 0.943519 0.564538\n1789 1 0.881217 0.867339 0.63319\n1790 1 0.937429 0.947413 0.633374\n1555 1 0.572591 0.508618 0.551433\n2040 1 0.627957 0.936811 0.94068\n1672 1 0.119637 0.554987 0.687893\n1699 1 0.0581429 0.626361 0.696799\n1794 1 0.080009 0.555556 0.754465\n1800 1 0.147181 0.568221 0.809946\n1829 1 0.13552 0.623647 0.74892\n1827 1 0.0751359 0.629021 0.815346\n1825 1 0.00157492 0.636229 0.756443\n1796 1 0.0158056 0.567817 0.805539\n1061 1 0.134893 0.638618 0.993651\n2045 1 0.870148 0.878354 0.878613\n1798 1 0.203637 0.566856 0.74595\n1676 1 0.261028 0.54149 0.689414\n1703 1 0.184809 0.626513 0.681924\n1707 1 0.297081 0.624235 0.694102\n1804 1 0.245083 0.566286 0.820387\n1831 1 0.186515 0.641902 0.810338\n1833 1 0.248717 0.643077 0.744924\n1835 1 0.314263 0.643619 0.803904\n1802 1 0.296203 0.56948 0.760716\n1037 1 0.371187 0.50808 0.999697\n1680 1 0.358277 0.562758 0.681386\n1711 1 0.4244 0.630437 0.688388\n1806 1 0.441943 0.567119 0.750873\n1837 1 0.372586 0.627269 0.7472\n1839 1 0.425275 0.636898 0.805457\n1808 1 0.367425 0.576069 0.811697\n1841 1 0.480895 0.633662 0.750275\n1683 1 0.546489 0.506003 0.7083\n775 1 0.20266 0.998955 0.820421\n1715 1 0.554867 0.637874 0.675661\n1812 1 0.497689 0.576028 0.810013\n1684 1 0.506732 0.569881 0.67761\n1688 1 0.611941 0.569973 0.678944\n1810 1 0.552785 0.56423 0.753747\n1816 1 0.628294 0.568463 0.803914\n1843 1 0.552912 0.635985 0.805428\n1845 1 0.619726 0.630941 0.740154\n1105 1 0.503725 0.754895 0.985465\n1034 1 0.315945 0.556551 0.998275\n2015 1 0.929768 0.753955 0.934815\n1692 1 0.738943 0.56129 0.689843\n1719 1 0.679963 0.628995 0.677634\n1814 1 0.67673 0.55928 0.737495\n1820 1 0.740003 0.564543 0.792425\n1849 1 0.746811 0.623294 0.737583\n1851 1 0.821873 0.641883 0.807069\n1818 1 0.813118 0.570486 0.758593\n1723 1 0.808573 0.629856 0.679278\n1847 1 0.679106 0.630924 0.796491\n1622 1 0.694872 0.816913 0.501387\n2026 1 0.321069 0.933684 0.874354\n1668 1 0.00161641 0.550264 0.691927\n1696 1 0.867747 0.568443 0.692261\n1727 1 0.936716 0.620347 0.692061\n1822 1 0.937906 0.554054 0.754391\n1824 1 0.878493 0.563723 0.806043\n1853 1 0.870993 0.620269 0.751233\n1855 1 0.932516 0.629236 0.798814\n1921 1 0.985802 0.508446 0.869208\n2032 1 0.381012 0.936744 0.937401\n1826 1 0.0807973 0.68209 0.748804\n1704 1 0.12443 0.677993 0.680486\n1731 1 0.070339 0.755927 0.681903\n1736 1 0.131592 0.818875 0.688611\n1859 1 0.0641793 0.75404 0.815074\n1861 1 0.111521 0.760471 0.744401\n1864 1 0.129457 0.818756 0.807558\n1832 1 0.128859 0.691221 0.807491\n1858 1 0.0581076 0.822646 0.759003\n1732 1 0.0156928 0.815219 0.684325\n1700 1 0.0123007 0.702903 0.701072\n1860 1 0.00329759 0.827669 0.812835\n1834 1 0.323 0.688258 0.742466\n1739 1 0.302031 0.746807 0.680102\n1830 1 0.184109 0.692155 0.747925\n1740 1 0.242392 0.811399 0.670195\n1708 1 0.247609 0.682811 0.679502\n1735 1 0.181548 0.753772 0.686433\n1836 1 0.265717 0.698965 0.810633\n1865 1 0.247954 0.7437 0.741667\n1868 1 0.24032 0.820014 0.808097\n1863 1 0.194217 0.752434 0.821037\n1867 1 0.314991 0.766759 0.822679\n1866 1 0.295705 0.804461 0.740179\n1862 1 0.189481 0.813175 0.74149\n1840 1 0.371803 0.707571 0.803478\n1744 1 0.356219 0.809092 0.683614\n1712 1 0.371809 0.690161 0.677282\n1743 1 0.420897 0.752948 0.684708\n1838 1 0.43067 0.69662 0.749595\n1869 1 0.367827 0.761744 0.748145\n1870 1 0.444586 0.812807 0.741914\n1871 1 0.438308 0.760911 0.806357\n1872 1 0.374519 0.82 0.812243\n1844 1 0.49573 0.686809 0.81329\n1873 1 0.494444 0.744388 0.74629\n1716 1 0.485372 0.683785 0.68533\n1748 1 0.493662 0.812513 0.67854\n1876 1 0.500661 0.81425 0.816415\n1720 1 0.622297 0.681038 0.674616\n1842 1 0.55194 0.687401 0.738406\n1747 1 0.55161 0.757404 0.670048\n1752 1 0.621861 0.810448 0.685259\n1877 1 0.617308 0.749257 0.730547\n1848 1 0.612785 0.690826 0.799551\n1880 1 0.625012 0.804769 0.797951\n1875 1 0.551878 0.744538 0.808005\n1874 1 0.553533 0.805319 0.740525\n1751 1 0.674198 0.753033 0.677584\n1878 1 0.684679 0.818089 0.740569\n1724 1 0.75231 0.679197 0.676579\n1846 1 0.675428 0.689974 0.738277\n1850 1 0.822801 0.687768 0.734407\n1852 1 0.744784 0.681451 0.793088\n1881 1 0.754105 0.738564 0.742288\n1882 1 0.808592 0.811774 0.756821\n1884 1 0.739069 0.812507 0.81132\n1879 1 0.687611 0.744017 0.799345\n1755 1 0.805647 0.757348 0.686294\n1883 1 0.806278 0.746975 0.807062\n1756 1 0.741653 0.801849 0.681033\n1828 1 0.990302 0.700004 0.808206\n1857 1 0.00445425 0.763245 0.753641\n1728 1 0.877122 0.695511 0.674022\n1759 1 0.939167 0.764761 0.700511\n1854 1 0.935599 0.689576 0.742005\n1856 1 0.883202 0.696759 0.797015\n1885 1 0.870273 0.760598 0.756718\n1887 1 0.941038 0.765099 0.80757\n1886 1 0.942224 0.833795 0.746646\n1760 1 0.873044 0.819252 0.695674\n1888 1 0.873734 0.824906 0.808311\n1763 1 0.0669978 0.89104 0.680456\n1768 1 0.13516 0.948558 0.683183\n1890 1 0.0719176 0.945566 0.760274\n1891 1 0.0738221 0.888538 0.81695\n1893 1 0.130007 0.880443 0.746897\n1896 1 0.138871 0.942428 0.809177\n1 1 -0.000345354 1.00054 0.985762\n1889 1 0.991597 0.890052 0.76296\n1892 1 0.991231 0.954422 0.817775\n1081 1 0.752757 0.618298 0.985083\n1561 1 0.726648 0.499815 0.508004\n1767 1 0.191559 0.87958 0.686637\n1772 1 0.251286 0.937505 0.686555\n1895 1 0.187436 0.884019 0.827287\n1894 1 0.203743 0.943216 0.759878\n1897 1 0.237257 0.875193 0.748985\n1898 1 0.310414 0.928991 0.740973\n1900 1 0.260555 0.936575 0.816079\n1771 1 0.301876 0.876917 0.685035\n1899 1 0.311587 0.870488 0.806565\n1693 1 0.876449 0.502379 0.643031\n911 1 0.445789 0.990647 0.932933\n1819 1 0.805275 0.502167 0.816109\n1775 1 0.439457 0.881571 0.684573\n1776 1 0.376379 0.940809 0.674989\n1901 1 0.373089 0.869296 0.747204\n1902 1 0.430301 0.929905 0.75138\n1903 1 0.437285 0.871019 0.81659\n1904 1 0.36072 0.934372 0.80388\n901 1 0.139686 0.997202 0.876433\n1908 1 0.504662 0.933637 0.819032\n1905 1 0.500396 0.872981 0.748976\n1935 1 0.433193 0.520077 0.943852\n1780 1 0.500135 0.936322 0.698187\n1779 1 0.563316 0.87341 0.686774\n1784 1 0.611428 0.947133 0.683542\n1906 1 0.560208 0.934531 0.746375\n1907 1 0.558455 0.883478 0.816587\n1909 1 0.61905 0.866614 0.744557\n1912 1 0.622407 0.938304 0.803008\n2013 1 0.866956 0.760866 0.871368\n1783 1 0.671506 0.879808 0.676346\n1787 1 0.807913 0.875665 0.690035\n1911 1 0.688684 0.884837 0.81583\n1788 1 0.747427 0.941708 0.685399\n1910 1 0.684739 0.928874 0.732162\n1913 1 0.740244 0.870822 0.759029\n1914 1 0.813713 0.944882 0.747207\n1916 1 0.757257 0.941182 0.800066\n1915 1 0.81269 0.887004 0.817763\n2041 1 0.741601 0.874401 0.877129\n2016 1 0.865348 0.817349 0.937515\n2031 1 0.440344 0.868326 0.94242\n1569 1 0.977567 0.624767 0.500665\n1764 1 0.00324025 0.944907 0.702638\n1792 1 0.875862 0.942925 0.683502\n1920 1 0.869625 0.946275 0.820721\n1791 1 0.942606 0.886619 0.688496\n1917 1 0.867943 0.889593 0.75236\n1918 1 0.933364 0.961517 0.759499\n1919 1 0.935364 0.875109 0.817042\n1809 1 0.491035 0.505618 0.770446\n1922 1 0.0790721 0.545239 0.865939\n1928 1 0.116821 0.577712 0.929277\n1955 1 0.0637864 0.641881 0.935774\n1957 1 0.129389 0.638529 0.883045\n2017 1 1.00055 0.872175 0.88189\n2036 1 0.498694 0.929636 0.940222\n1926 1 0.182419 0.564945 0.883415\n1930 1 0.309444 0.574807 0.878552\n1963 1 0.3071 0.638292 0.93652\n1961 1 0.253971 0.639924 0.88063\n1932 1 0.245202 0.557673 0.937703\n1959 1 0.195667 0.630349 0.935184\n2019 1 0.0682419 0.882473 0.947645\n1982 1 0.93191 0.710264 0.86941\n2029 1 0.379551 0.88448 0.873863\n2038 1 0.708696 0.945125 0.889332\n2024 1 0.12496 0.937131 0.937305\n1967 1 0.432948 0.636867 0.9414\n1965 1 0.370028 0.636361 0.87195\n1934 1 0.428661 0.573256 0.872295\n1936 1 0.370598 0.58749 0.937681\n2043 1 0.811456 0.866277 0.942757\n1985 1 0.0123405 0.763055 0.878259\n1582 1 0.439708 0.701636 0.505918\n2039 1 0.679121 0.871829 0.94815\n1940 1 0.488593 0.57206 0.92654\n1969 1 0.494148 0.6276 0.872205\n1938 1 0.56884 0.574775 0.865186\n1944 1 0.621435 0.565212 0.928118\n1971 1 0.564237 0.631207 0.920439\n1973 1 0.62985 0.64269 0.858756\n1562 1 0.815932 0.561768 0.51757\n2042 1 0.801806 0.940674 0.880897\n2035 1 0.564937 0.868679 0.937368\n1975 1 0.69673 0.631164 0.922824\n1942 1 0.680087 0.570382 0.868936\n1946 1 0.82499 0.582323 0.870036\n1977 1 0.758608 0.615841 0.849923\n1948 1 0.759872 0.565802 0.923609\n1979 1 0.827124 0.626626 0.943473\n2044 1 0.764585 0.930941 0.943411\n2018 1 0.049107 0.938726 0.875643\n1026 1 0.0626372 0.576169 0.99856\n2028 1 0.265951 0.935618 0.938025\n2030 1 0.442121 0.933748 0.868458\n1661 1 0.873983 0.864455 0.521039\n1953 1 0.999242 0.63398 0.862671\n1924 1 0.02005 0.579924 0.912463\n1950 1 0.940409 0.577627 0.866312\n1952 1 0.876156 0.568137 0.937317\n1983 1 0.932122 0.630111 0.939589\n1981 1 0.884409 0.640752 0.863058\n2020 1 0.999303 0.939469 0.937781\n2023 1 0.190205 0.857188 0.934394\n1992 1 0.119834 0.818289 0.94048\n1987 1 0.0650098 0.729369 0.928843\n1960 1 0.13573 0.692807 0.93785\n1989 1 0.12192 0.744342 0.877863\n1986 1 0.066467 0.818692 0.872371\n1954 1 0.0559901 0.688495 0.863822\n1956 1 0.992859 0.6971 0.925681\n1988 1 0.00178636 0.824759 0.947454\n2037 1 0.624427 0.86563 0.853467\n777 1 0.260067 1.00014 0.755069\n2025 1 0.250939 0.875095 0.866972\n1570 1 0.0520063 0.674491 0.501167\n527 1 0.441692 0.997477 0.562529\n1995 1 0.320197 0.764859 0.933165\n1962 1 0.313569 0.698813 0.882665\n1990 1 0.176067 0.810609 0.869406\n1958 1 0.206869 0.699756 0.87587\n1993 1 0.255758 0.761332 0.870087\n1964 1 0.252452 0.694758 0.94587\n1996 1 0.261869 0.813717 0.934664\n1991 1 0.192274 0.759845 0.930855\n1994 1 0.321282 0.82215 0.871782\n2022 1 0.202296 0.941567 0.888517\n913 1 0.509303 0.991335 0.87085\n1042 1 0.549569 0.566349 0.981552\n1984 1 0.869021 0.693825 0.941216\n1968 1 0.365199 0.69541 0.949182\n1966 1 0.417276 0.695439 0.87307\n2000 1 0.38096 0.819375 0.94223\n1997 1 0.374362 0.756851 0.87359\n1998 1 0.442971 0.80958 0.882953\n1999 1 0.434817 0.749915 0.947828\n2004 1 0.506351 0.813986 0.931828\n1972 1 0.498017 0.683914 0.932271\n2001 1 0.491942 0.748281 0.87934\n2014 1 0.940761 0.81614 0.882824\n2034 1 0.574135 0.929194 0.880377\n1970 1 0.564546 0.689732 0.867841\n1976 1 0.627102 0.68574 0.933978\n2003 1 0.559997 0.761141 0.931595\n2008 1 0.623244 0.813435 0.928187\n2005 1 0.640758 0.737378 0.871889\n2002 1 0.566999 0.808699 0.868313\n1679 1 0.424506 0.501989 0.693354\n2027 1 0.324966 0.872924 0.940011\n1538 1 0.0535105 0.548949 0.509707\n2009 1 0.756649 0.750367 0.870096\n2006 1 0.696111 0.80216 0.885883\n2011 1 0.808869 0.764741 0.937588\n2010 1 0.811126 0.821923 0.865416\n1974 1 0.706618 0.683419 0.862927\n1978 1 0.813892 0.695866 0.886152\n2007 1 0.689794 0.744755 0.94433\n2012 1 0.74312 0.815516 0.948319\n1980 1 0.755776 0.698414 0.94192\n2021 1 0.121848 0.877079 0.877819\n1559 1 0.666917 0.503741 0.56892\n905 1 0.257391 0.997637 0.876499\n1923 1 0.070895 0.512034 0.939968\n1943 1 0.679428 0.508224 0.932538\n779 1 0.310697 0.998645 0.808374\n771 1 0.0814049 0.997028 0.819121\n903 1 0.197266 0.996449 0.940246\n1574 1 0.188148 0.687326 0.50333\n1141 1 0.619536 0.867238 0.989806\n1949 1 0.877631 0.520167 0.87078\n1937 1 0.489419 0.508329 0.867968\n1122 1 0.0655199 0.94057 1.00274\n17 1 0.487396 0.997212 0.992121\n1630 1 0.957182 0.813431 0.508595\n1566 1 0.935028 0.548432 0.509281\n1045 1 0.620439 0.513331 0.984104\n1074 1 0.54883 0.693369 0.996351\n1134 1 0.4448 0.934961 0.994347\n1097 1 0.263014 0.759057 0.996555\n1109 1 0.614229 0.739445 0.990976\n521 1 0.237527 0.998982 0.504056\n1606 1 0.175535 0.825284 0.505947\n1073 1 0.496369 0.621679 0.997938\n1609 1 0.238777 0.741904 0.503833\n1149 1 0.876436 0.870862 0.995722\n1082 1 0.819471 0.686564 0.996925\n1593 1 0.738269 0.615687 0.512377\n1130 1 0.315631 0.938343 0.996257\n1654 1 0.690808 0.94745 0.503011\n"
] | true |
99,533 |
10da6b06bd51d413b4937670e9141053f12bdc30
|
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfdevice import PDFDevice
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState
from pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream
from pdfminer.psparser import LIT, PSLiteral
from pdfminer.pdftypes import PDFObjRef, resolve1
from pdfminer.utils import mult_matrix
from pdftext import TextAnalyzer, textSpanBox
import pdffonts
import colorspace
def literal(name): return LIT(
name) if not isinstance(name, PSLiteral) else name
def render_type(ftype):
def render_function(func):
def render_arguments(self, *args, **kwargs):
if ftype in self.filtered:
return
return func(self, *args, **kwargs)
return render_arguments
return render_function
def get_default(res_type):
def binding(func):
def get_arguments(self, objid, obj=None):
res_list = getattr(self, res_type+'s', None)
if res_list is None:
return
if objid is not None:
objid = literal(objid)
if objid in res_list:
return res_list[objid]
elif obj is None:
return
func(self, objid, obj=obj)
if objid is not None:
return res_list.get(objid)
return get_arguments
return binding
class Paint:
def __init__(self, cs, value):
self.cs = cs
self.value = value
def draw(self):
return self.cs.getRGB(*self.value)
class TextState(PDFTextState):
def __init__(self):
super().__init__()
self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.font = self.font
obj.fontsize = self.fontsize
obj.charspace = self.charspace
obj.wordspace = self.wordspace
obj.scaling = self.scaling
obj.leading = self.leading
obj.render = self.render
obj.rise = self.rise
obj.matrix = self.matrix
obj.linematrix = self.linematrix
obj.fill = self.fill
obj.extState = self.extState
return obj
def __setattr__(self, key, value):
if key in ['charspace', 'wordspace']:
value *= getattr(self, 'scaling', 100) * 0.01
return object.__setattr__(self, key, value)
class GraphicState(PDFGraphicState):
def __init__(self):
super().__init__()
self.stroke = self.fill = None
self.extState = {}
def copy(self):
obj = self.__class__()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.stroke = self.stroke
obj.fill = self.fill
obj.extState = self.extState
return obj
class Device(PDFDevice):
def __init__(self, filtered=None, laparams=None, check_visible=True):
super().__init__(None)
self.filtered = filtered or []
self.check_visible = check_visible
self.analyzer = TextAnalyzer(**(laparams or {}))
self.pageno = 1
self.reset()
self.viewBox = [0, 0, 0, 0]
def reset(self):
self.images = {}
self.text_layer = []
self.layers = {}
self.layer_stack = []
def begin_page(self, page, ctm):
self.reset()
self.layers[LIT('Page')] = (page.cropbox, ctm)
self.layer_stack = [LIT('Page')]
self.viewBox = page.cropbox
self.ymax = page.mediabox[3] - page.mediabox[1]
def is_visible(self, span, bbox):
boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))
if len(boxset) < len(span.bbox):
return False
xmin, ymin, xmax, ymax = bbox
return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)
def get_current_layer(self):
i = -1
depth = 0
while True:
layerName = self.layer_stack[i]
if layerName == 'end':
depth += 1
else:
depth -= 1
if depth < 0:
break
i -= 1
return layerName, self.layers[layerName]
def end_page(self, page):
self.text_layer = filter(lambda x: not self.check_visible
or self.is_visible(x, self.viewBox), self.text_layer)
lines = self.analyzer.group_lines(self.text_layer)
paras = self.analyzer.group_paras(lines)
self.text_layer = paras
self.pageno += 1
def begin_figure(self, name, bbox, matrix):
x, y, w, h = bbox
self.layers[name] = ([x, y, x+w, y+h], matrix)
self.layer_stack.append(name)
def end_figure(self, name):
self.layer_stack.append('end')
@render_type('path')
def paint_path(self, graphicstate, stroke, fill, evenodd, path):
# path handling suspended
return path
@render_type('image')
def render_image(self, name, stream, anchored=False, textstate=None):
bbox, matrix = self.get_current_layer()[1]
self.images.setdefault(stream.objid, (name, stream, bbox, matrix))
@render_type('text')
def render_string(self, textstate, seq, *args):
layerName = self.get_current_layer()[0]
x, y = textstate.linematrix
a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)
matrix = a, b, c, d, e, self.ymax - f
box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)
# check if text is visible
if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):
self.text_layer.append(box)
elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):
self.text_layer.append(box)
textstate.linematrix = box.originbox[2]
class ResourceManager(PDFResourceManager):
def __init__(self):
self.fonts = {}
self.colorspaces = colorspace.defaults.copy()
self.xobjects = {}
self.cache = {}
self.stream_objects = []
def clear(self):
for res in self.fonts:
stream_to_close = getattr(res, 'embedFont', None)
stream_to_close and stream_to_close.close()
self.fonts.clear()
self.colorspaces.clear()
self.xobjects.clear()
def render_resource(self, res_type, res_obj):
get_function = getattr(self, 'get_' + res_type.lower(), None)
return get_function and get_function(None, obj=res_obj)
@get_default('font')
def get_font(self, objid, obj=None):
for (fontid, spec) in dict_value(obj).items():
spec = dict_value(spec)
spec, fontType, embedFont, opentype = pdffonts.getType(spec)
if fontType:
font = fontType(spec, embedFont=embedFont and self.xobjects.get(
embedFont.objid, embedFont), opentype=opentype)
if embedFont:
objid = literal(embedFont.objid)
if not objid in self.xobjects:
self.xobjects[objid] = font.embedFont
self.fonts[literal(fontid)] = font
@get_default('colorspace')
def get_colorspace(self, objid, obj=None):
for (csid, spec) in dict_value(obj).items():
cs = colorspace.parse(spec)
if cs:
self.colorspaces[literal(csid)] = cs
def get_procset(self, objid, obj=None):
# procset handling suspended
pass
@get_default('xobject')
def get_xobject(self, objid, obj=None):
for (xobjid, xobjstrm) in dict_value(obj).items():
self.xobjects[literal(xobjid)] = xobjstrm
class Interpreter(PDFPageInterpreter):
def __init__(self, device):
self.rsrcmgr = ResourceManager()
self.device = device
# custom logging here
def log(self, message):
pass
def dup(self):
return self.__class__(self.device)
def close(self):
self.rsrcmgr.clear()
def init_resources(self, resources):
self.resources = resources
if resources:
for (k, v) in dict_value(resources).items():
self.debug and self.log('Resource: %r: %r' % (k, v))
self.rsrcmgr.render_resource(k, v)
def init_state(self, ctm):
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = TextState()
self.graphicstate = GraphicState()
self.curpath = []
self.argstack = []
self.scs = self.ncs = colorspace.CMYKColorSpace()
def do_CS(self, name):
self.scs = self.rsrcmgr.get_colorspace(literal(name))
def do_cs(self, name):
self.ncs = self.rsrcmgr.get_colorspace(literal(name))
def do_SCN(self):
n = len(self.scs.mode)
pattern = self.argstack[-n:]
self.graphicstate.stroke = Paint(self.scs, pattern)
self.argstack = self.argstack[:-n]
def do_scn(self):
n = len(self.ncs.mode)
pattern = self.argstack[-n:]
self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)
self.argstack = self.argstack[:-n]
def do_G(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.stroke = Paint(cs, gray)
def do_g(self, gray):
cs = colorspace.GrayColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)
def do_RG(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.stroke = Paint(cs, (r, g, b))
def do_rg(self, r, g, b):
cs = colorspace.RGBColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))
def do_K(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.stroke = Paint(cs, (c, m, y, k))
def do_k(self, c, m, y, k):
cs = colorspace.CMYKColorSpace()
self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))
def do_Tf(self, fontid, fontsize):
self.textstate.font = self.rsrcmgr.get_font(literal(fontid))
self.textstate.fontsize = fontsize
def do_Do(self, xobjid):
xobj = self.rsrcmgr.get_xobject(literal(xobjid))
if not xobj:
return
self.debug and self.log('Processing xobj: %r' % xobj)
xobj = stream_value(xobj)
subtype = xobj.get('Subtype')
if subtype is LIT('Form') and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')
) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(
resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:
self.device.render_image(xobjid, xobj, anchored=True)
else:
# unsupported xobject type.
pass
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
self.device.render_image(
str(id(obj)), obj, anchored=False, state=self.textstate)
def do_gs(self, name):
if isinstance(name, PSLiteral):
name = name.name
gstate = self.resources['ExtGState'].get(name)
if gstate and not self.textstate.extState:
gstate = resolve1(gstate)
self.textstate.extState = gstate
def do_q(self):
self.gstack.append(self.get_current_state())
def do_Q(self):
self.gstack and self.set_current_state(self.gstack.pop())
# def do_Td(self, tx, ty):
# x, y = self.textstate.linematrix
# # print((x,y), (tx,ty))
# (a, b, c, d, e, f) = self.textstate.matrix
# print((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))
# self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
# self.textstate.linematrix = (0, 0)
|
[
"\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfdevice import PDFDevice\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState\nfrom pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream\nfrom pdfminer.psparser import LIT, PSLiteral\nfrom pdfminer.pdftypes import PDFObjRef, resolve1\nfrom pdfminer.utils import mult_matrix\n\nfrom pdftext import TextAnalyzer, textSpanBox\nimport pdffonts\nimport colorspace\n\n\ndef literal(name): return LIT(\n name) if not isinstance(name, PSLiteral) else name\n\n\ndef render_type(ftype):\n def render_function(func):\n def render_arguments(self, *args, **kwargs):\n if ftype in self.filtered:\n return\n return func(self, *args, **kwargs)\n return render_arguments\n return render_function\n\n\ndef get_default(res_type):\n def binding(func):\n def get_arguments(self, objid, obj=None):\n res_list = getattr(self, res_type+'s', None)\n if res_list is None:\n return\n if objid is not None:\n objid = literal(objid)\n if objid in res_list:\n return res_list[objid]\n elif obj is None:\n return\n func(self, objid, obj=obj)\n if objid is not None:\n return res_list.get(objid)\n return get_arguments\n return binding\n\n\nclass Paint:\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**(laparams or {}))\n\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = (page.cropbox, ctm)\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible\n or self.is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = ([x, y, x+w, y+h], matrix)\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n # path handling suspended\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName, matrix=matrix)\n\n # check if text is visible\n if not textstate.extState.get('OP', False) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for (fontid, spec) in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects.get(\n embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for (csid, spec) in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n # procset handling suspended\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for (xobjid, xobjstrm) in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n # custom logging here\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for (k, v) in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n # According to PDF reference 1.7 section 4.9.1, XObjects in\n # earlier PDFs (prior to v1.2) use the page's Resources entry\n # instead of having their own Resources entry.\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(\n resources, [xobj], ctm=mult_matrix(matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n # unsupported xobject type.\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(\n str(id(obj)), obj, anchored=False, state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n\n # def do_Td(self, tx, ty):\n # \tx, y = self.textstate.linematrix\n # \t# print((x,y), (tx,ty))\n # \t(a, b, c, d, e, f) = self.textstate.matrix\n # \tprint((x,y), (tx,ty), (tx*a+ty*c+e, tx*b+ty*d+f))\n # \tself.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)\n # \tself.textstate.linematrix = (0, 0)\n\n",
"from pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfdevice import PDFDevice\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, PDFTextState, PDFGraphicState\nfrom pdfminer.pdftypes import list_value, dict_value, stream_value, PDFStream\nfrom pdfminer.psparser import LIT, PSLiteral\nfrom pdfminer.pdftypes import PDFObjRef, resolve1\nfrom pdfminer.utils import mult_matrix\nfrom pdftext import TextAnalyzer, textSpanBox\nimport pdffonts\nimport colorspace\n\n\ndef literal(name):\n return LIT(name) if not isinstance(name, PSLiteral) else name\n\n\ndef render_type(ftype):\n\n def render_function(func):\n\n def render_arguments(self, *args, **kwargs):\n if ftype in self.filtered:\n return\n return func(self, *args, **kwargs)\n return render_arguments\n return render_function\n\n\ndef get_default(res_type):\n\n def binding(func):\n\n def get_arguments(self, objid, obj=None):\n res_list = getattr(self, res_type + 's', None)\n if res_list is None:\n return\n if objid is not None:\n objid = literal(objid)\n if objid in res_list:\n return res_list[objid]\n elif obj is None:\n return\n func(self, objid, obj=obj)\n if objid is not None:\n return res_list.get(objid)\n return get_arguments\n return binding\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n\n\ndef literal(name):\n return LIT(name) if not isinstance(name, PSLiteral) else name\n\n\ndef render_type(ftype):\n\n def render_function(func):\n\n def render_arguments(self, *args, **kwargs):\n if ftype in self.filtered:\n return\n return func(self, *args, **kwargs)\n return render_arguments\n return render_function\n\n\ndef get_default(res_type):\n\n def binding(func):\n\n def get_arguments(self, objid, obj=None):\n res_list = getattr(self, res_type + 's', None)\n if res_list is None:\n return\n if objid is not None:\n objid = literal(objid)\n if objid in res_list:\n return res_list[objid]\n elif obj is None:\n return\n func(self, objid, obj=obj)\n if objid is not None:\n return res_list.get(objid)\n return get_arguments\n return binding\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n\n\ndef render_type(ftype):\n\n def render_function(func):\n\n def render_arguments(self, *args, **kwargs):\n if ftype in self.filtered:\n return\n return func(self, *args, **kwargs)\n return render_arguments\n return render_function\n\n\ndef get_default(res_type):\n\n def binding(func):\n\n def get_arguments(self, objid, obj=None):\n res_list = getattr(self, res_type + 's', None)\n if res_list is None:\n return\n if objid is not None:\n objid = literal(objid)\n if objid in res_list:\n return res_list[objid]\n elif obj is None:\n return\n func(self, objid, obj=obj)\n if objid is not None:\n return res_list.get(objid)\n return get_arguments\n return binding\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_default(res_type):\n\n def binding(func):\n\n def get_arguments(self, objid, obj=None):\n res_list = getattr(self, res_type + 's', None)\n if res_list is None:\n return\n if objid is not None:\n objid = literal(objid)\n if objid in res_list:\n return res_list[objid]\n elif obj is None:\n return\n func(self, objid, obj=obj)\n if objid is not None:\n return res_list.get(objid)\n return get_arguments\n return binding\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n\n def draw(self):\n return self.cs.getRGB(*self.value)\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass Paint:\n\n def __init__(self, cs, value):\n self.cs = cs\n self.value = value\n <function token>\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass Paint:\n <function token>\n <function token>\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n def __setattr__(self, key, value):\n if key in ['charspace', 'wordspace']:\n value *= getattr(self, 'scaling', 100) * 0.01\n return object.__setattr__(self, key, value)\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass TextState(PDFTextState):\n\n def __init__(self):\n super().__init__()\n self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n <function token>\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass TextState(PDFTextState):\n <function token>\n\n def copy(self):\n obj = self.__class__()\n obj.font = self.font\n obj.fontsize = self.fontsize\n obj.charspace = self.charspace\n obj.wordspace = self.wordspace\n obj.scaling = self.scaling\n obj.leading = self.leading\n obj.render = self.render\n obj.rise = self.rise\n obj.matrix = self.matrix\n obj.linematrix = self.linematrix\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n <function token>\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n\n\nclass TextState(PDFTextState):\n <function token>\n <function token>\n <function token>\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n\n def copy(self):\n obj = self.__class__()\n obj.linewidth = self.linewidth\n obj.linecap = self.linecap\n obj.linejoin = self.linejoin\n obj.miterlimit = self.miterlimit\n obj.dash = self.dash\n obj.intent = self.intent\n obj.flatness = self.flatness\n obj.stroke = self.stroke\n obj.fill = self.fill\n obj.extState = self.extState\n return obj\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass GraphicState(PDFGraphicState):\n\n def __init__(self):\n super().__init__()\n self.stroke = self.fill = None\n self.extState = {}\n <function token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass GraphicState(PDFGraphicState):\n <function token>\n <function token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n\n def get_current_layer(self):\n i = -1\n depth = 0\n while True:\n layerName = self.layer_stack[i]\n if layerName == 'end':\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n break\n i -= 1\n return layerName, self.layers[layerName]\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n\n def begin_figure(self, name, bbox, matrix):\n x, y, w, h = bbox\n self.layers[name] = [x, y, x + w, y + h], matrix\n self.layer_stack.append(name)\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n\n @render_type('text')\n def render_string(self, textstate, seq, *args):\n layerName = self.get_current_layer()[0]\n x, y = textstate.linematrix\n a, b, c, d, e, f = mult_matrix(textstate.matrix, self.ctm)\n matrix = a, b, c, d, e, self.ymax - f\n box = textSpanBox((x, y), seq, textstate, layerName=layerName,\n matrix=matrix)\n if not textstate.extState.get('OP', False\n ) or not textstate.extState.get('OPM', 0):\n self.text_layer.append(box)\n elif textstate.extState.get('OPM', 1) and any(textstate.fill.value):\n self.text_layer.append(box)\n textstate.linematrix = box.originbox[2]\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n\n def is_visible(self, span, bbox):\n boxset = set(map(lambda p: (int(p[0]), int(p[1])), span.bbox))\n if len(boxset) < len(span.bbox):\n return False\n xmin, ymin, xmax, ymax = bbox\n return all(xmin < x < xmax and ymin < y < ymax for x, y in boxset)\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n\n @render_type('image')\n def render_image(self, name, stream, anchored=False, textstate=None):\n bbox, matrix = self.get_current_layer()[1]\n self.images.setdefault(stream.objid, (name, stream, bbox, matrix))\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n\n @render_type('path')\n def paint_path(self, graphicstate, stroke, fill, evenodd, path):\n return path\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n\n def end_page(self, page):\n self.text_layer = filter(lambda x: not self.check_visible or self.\n is_visible(x, self.viewBox), self.text_layer)\n lines = self.analyzer.group_lines(self.text_layer)\n paras = self.analyzer.group_paras(lines)\n self.text_layer = paras\n self.pageno += 1\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n\n def __init__(self, filtered=None, laparams=None, check_visible=True):\n super().__init__(None)\n self.filtered = filtered or []\n self.check_visible = check_visible\n self.analyzer = TextAnalyzer(**laparams or {})\n self.pageno = 1\n self.reset()\n self.viewBox = [0, 0, 0, 0]\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n <function token>\n\n def reset(self):\n self.images = {}\n self.text_layer = []\n self.layers = {}\n self.layer_stack = []\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n <function token>\n <function token>\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n <function token>\n <function token>\n\n def end_figure(self, name):\n self.layer_stack.append('end')\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n <function token>\n <function token>\n\n def begin_page(self, page, ctm):\n self.reset()\n self.layers[LIT('Page')] = page.cropbox, ctm\n self.layer_stack = [LIT('Page')]\n self.viewBox = page.cropbox\n self.ymax = page.mediabox[3] - page.mediabox[1]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass Device(PDFDevice):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n\n def render_resource(self, res_type, res_obj):\n get_function = getattr(self, 'get_' + res_type.lower(), None)\n return get_function and get_function(None, obj=res_obj)\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n <function token>\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n\n @get_default('colorspace')\n def get_colorspace(self, objid, obj=None):\n for csid, spec in dict_value(obj).items():\n cs = colorspace.parse(spec)\n if cs:\n self.colorspaces[literal(csid)] = cs\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n <function token>\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n <function token>\n\n def get_procset(self, objid, obj=None):\n pass\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n <function token>\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n <function token>\n <function token>\n\n @get_default('xobject')\n def get_xobject(self, objid, obj=None):\n for xobjid, xobjstrm in dict_value(obj).items():\n self.xobjects[literal(xobjid)] = xobjstrm\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n <function token>\n\n @get_default('font')\n def get_font(self, objid, obj=None):\n for fontid, spec in dict_value(obj).items():\n spec = dict_value(spec)\n spec, fontType, embedFont, opentype = pdffonts.getType(spec)\n if fontType:\n font = fontType(spec, embedFont=embedFont and self.xobjects\n .get(embedFont.objid, embedFont), opentype=opentype)\n if embedFont:\n objid = literal(embedFont.objid)\n if not objid in self.xobjects:\n self.xobjects[objid] = font.embedFont\n self.fonts[literal(fontid)] = font\n <function token>\n <function token>\n <function token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n\n def clear(self):\n for res in self.fonts:\n stream_to_close = getattr(res, 'embedFont', None)\n stream_to_close and stream_to_close.close()\n self.fonts.clear()\n self.colorspaces.clear()\n self.xobjects.clear()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n\n def __init__(self):\n self.fonts = {}\n self.colorspaces = colorspace.defaults.copy()\n self.xobjects = {}\n self.cache = {}\n self.stream_objects = []\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ResourceManager(PDFResourceManager):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n\n def do_EI(self, obj):\n if 'W' in obj and 'H' in obj:\n self.device.render_image(str(id(obj)), obj, anchored=False,\n state=self.textstate)\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n\n def do_Do(self, xobjid):\n xobj = self.rsrcmgr.get_xobject(literal(xobjid))\n if not xobj:\n return\n self.debug and self.log('Processing xobj: %r' % xobj)\n xobj = stream_value(xobj)\n subtype = xobj.get('Subtype')\n if subtype is LIT('Form') and 'BBox' in xobj:\n interpreter = self.dup()\n bbox = list_value(xobj['BBox'])\n matrix = list_value(xobj.get('Matrix', (1, 0, 0, 1, 0, 0)))\n resources = dict_value(xobj.get('Resources')\n ) or self.resources.copy()\n self.device.begin_figure(xobjid, bbox, matrix)\n interpreter.render_contents(resources, [xobj], ctm=mult_matrix(\n matrix, self.ctm))\n self.device.end_figure(xobjid)\n elif subtype is LIT('Image') and 'Width' in xobj and 'Height' in xobj:\n self.device.render_image(xobjid, xobj, anchored=True)\n else:\n pass\n <function token>\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n\n def do_gs(self, name):\n if isinstance(name, PSLiteral):\n name = name.name\n gstate = self.resources['ExtGState'].get(name)\n if gstate and not self.textstate.extState:\n gstate = resolve1(gstate)\n self.textstate.extState = gstate\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n\n def dup(self):\n return self.__class__(self.device)\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n\n def log(self, message):\n pass\n <function token>\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n\n def do_K(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.stroke = Paint(cs, (c, m, y, k))\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_scn(self):\n n = len(self.ncs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.fill = self.textstate.fill = Paint(self.ncs, pattern)\n self.argstack = self.argstack[:-n]\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n\n def close(self):\n self.rsrcmgr.clear()\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n\n def do_Tf(self, fontid, fontsize):\n self.textstate.font = self.rsrcmgr.get_font(literal(fontid))\n self.textstate.fontsize = fontsize\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n\n def do_k(self, c, m, y, k):\n cs = colorspace.CMYKColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (c, m, y, k))\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n\n def do_Q(self):\n self.gstack and self.set_current_state(self.gstack.pop())\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_SCN(self):\n n = len(self.scs.mode)\n pattern = self.argstack[-n:]\n self.graphicstate.stroke = Paint(self.scs, pattern)\n self.argstack = self.argstack[:-n]\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n\n def init_resources(self, resources):\n self.resources = resources\n if resources:\n for k, v in dict_value(resources).items():\n self.debug and self.log('Resource: %r: %r' % (k, v))\n self.rsrcmgr.render_resource(k, v)\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_q(self):\n self.gstack.append(self.get_current_state())\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n\n def __init__(self, device):\n self.rsrcmgr = ResourceManager()\n self.device = device\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n\n def do_CS(self, name):\n self.scs = self.rsrcmgr.get_colorspace(literal(name))\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n <function token>\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n\n def do_RG(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.stroke = Paint(cs, (r, g, b))\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n <function token>\n\n def do_cs(self, name):\n self.ncs = self.rsrcmgr.get_colorspace(literal(name))\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n <function token>\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_G(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.stroke = Paint(cs, gray)\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n <function token>\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n <function token>\n\n def do_rg(self, r, g, b):\n cs = colorspace.RGBColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, (r, g, b))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def init_state(self, ctm):\n self.gstack = []\n self.ctm = ctm\n self.device.set_ctm(self.ctm)\n self.textstate = TextState()\n self.graphicstate = GraphicState()\n self.curpath = []\n self.argstack = []\n self.scs = self.ncs = colorspace.CMYKColorSpace()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def do_g(self, gray):\n cs = colorspace.GrayColorSpace()\n self.graphicstate.fill = self.textstate.fill = Paint(cs, gray)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass Interpreter(PDFPageInterpreter):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,534 |
270cbd72ab3595ceb7ebbd16d7d2e3c4db1abad2
|
import ROOT
from array import array
from plothelper import *
ROOT.gROOT.SetBatch(ROOT.kTRUE)
setStyle()
def adjust(hist):
name = hist.GetName()
#if "ntracks" in name: hist.Rebin()
if "ntracks" in name: hist.GetXaxis().SetRangeUser(0,600)
#if "npfs" in name: hist.Rebin()
if "npfs" in name:
hist.GetXaxis().SetRangeUser(0,600)
if "nneutrals" in name:
hist.GetXaxis().SetRangeUser(0,600)
hist.GetXaxis().SetTitle("n neutrals")
return
def clean1D(hist):
# Clean
adjust(hist)
hist.SetLineWidth(2)
hist.GetYaxis().SetNdivisions(505)
hist.GetXaxis().SetNdivisions(505)
hist.SetDirectory(0)
hist.Scale(1.0/hist.Integral(0,-1))
return hist
def get1D(mMed,mDark,temp,decay,histname):
# Get hist
filename = "outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root".format(mMed,mDark,temp,decay)
f = ROOT.TFile.Open(filename)
hist = f.Get(histname)
clean1D(hist)
return hist
def getQCD(histname):
# Get hist
filename1 = "outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root"# do slicing later
filename2 = "outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root"# do slicing later
filename3 = "outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root"# do slicing later
f1 = ROOT.TFile.Open(filename1)
f2 = ROOT.TFile.Open(filename2)
f3 = ROOT.TFile.Open(filename3)
hist1 = f1.Get(histname)
hist2 = f2.Get(histname)
hist3 = f3.Get(histname)
hist1.Scale(1207)
hist2.Scale(119.9)
hist3.Scale(25.24)
hist1.Add(hist2)
hist2.Add(hist3)
clean1D(hist1)
return hist1
def decay_label(decay):
if "darkPhoHad" in decay: return "m_{A'}=0.7 GeV"
if "darkPho" in decay: return "m_{A'}=0.5 GeV"
if "generic" in decay: return "m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}"
def makeROC(hists,labels,filename):
c = ROOT.TCanvas(filename,"",800,800)
dy = 0.05*len(hists)
leg = ROOT.TLegend(0.18,0.86-dy,0.86,0.86)
leg.SetTextSize(0.04)
leg.SetBorderSize(0)
for i,hist in enumerate(hists):
if "QCD" in labels[i] : hbkg = hist
ymax = 0
mgraph = ROOT.TMultiGraph()
for i,hist in enumerate(hists):
if "QCD" in labels[i]: continue
eff_sig = []
eff_bkg = []
err = []
for b in range(1,hist.GetNbinsX()+1):
eff_sig.append( hist.Integral(b,-1) )
eff_bkg.append( hbkg.Integral(b,-1) )
err.append(0.000000001)
graph = ROOT.TGraphErrors(len(err),array("d",eff_sig),array("d",eff_bkg),array("d",err),array("d",err))
graph.SetLineColor(colors[i])
mgraph.Add(graph)
leg.AddEntry(graph,labels[i],"l")
mgraph.SetTitle(";sig eff;bkg eff")
mgraph.Draw("AELP")
mgraph.GetYaxis().SetRangeUser(0.00000001,1)
leg.Draw()
c.SetLogy(1)
c.SetLogx(0)
c.Print("plots/{}.png".format(filename))
c.SetLogy(0)
c.SetLogx(0)
def compare1D(hists,labels,filename):
c = ROOT.TCanvas(filename,"",800,800)
dy = 0.05*len(hists)
leg = ROOT.TLegend(0.18,0.86-dy,0.86,0.86)
leg.SetTextSize(0.04)
leg.SetBorderSize(0)
ymax = 0
for i,hist in enumerate(hists):
hist.SetLineColor(colors[i])
if "QCD" in labels[i]: hist.SetLineColor(ROOT.kBlack)
if i==0: hist.Draw("hist")
else : hist.Draw("hist same")
if hist.GetMaximum() > ymax: ymax=hist.GetMaximum()
leg.AddEntry(hist,labels[i],"l")
leg.Draw()
c.SetLogy(1)
hists[0].GetYaxis().SetRangeUser(0.001,ymax*100)
c.Print("plots/{}_log.png".format(filename))
hists[0].GetYaxis().SetRangeUser(0,ymax*1.8)
c.SetLogy(0)
c.Print("plots/{}_lin.png".format(filename))
def compareMass(temp,mDark,decay,histname):
mMeds = []
mMeds.append(125)
mMeds.append(400)
mMeds.append(750)
mMeds.append(1000)
hists = []
labels = []
for mMed in mMeds:
hists.append(get1D(mMed,mDark,temp,decay,histname))
label = "m_{S}=%i GeV, %s"%(mMed,decay_label(decay))
labels.append(label)
hists.append(getQCD(histname))
labels.append("QCD, H_{T}>1 TeV")
compare1D(hists,labels,"compare_mMed/temp{}_mDark{}_decay_{}_{}".format(temp,mDark,decay,histname))
if histname=="h_pf_ntracks":
makeROC(hists,labels,"roc_curve/temp{}_mDark{}_decay_{}_{}".format(temp,mDark,decay,histname))
def compareDecay(mMed,temp,mDark,histname):
decays = []
decays.append("darkPho")
decays.append("darkPhoHad")
decays.append("generic")
hists = []
labels = []
for decay in decays:
hists.append(get1D(mMed,mDark,temp,decay,histname))
label = "m_{S}=%i GeV,%s"%(mMed,decay_label(decay))
labels.append(label)
compare1D(hists,labels,"compare_decay/mMed{}_temp{}_mDark{}_{}".format(mMed,temp,mDark,histname))
dists=[]
dists.append("h_jet_eta")
dists.append("h_pf_charged_ptzoom")
dists.append("h_pf_neutral_e")
dists.append("h_trigger")
dists.append("h_jet_pt")
dists.append("h_trig_ht")
dists.append("h_pf_charged_qual")
dists.append("h_pf_neutral_ptzoom")
dists.append("h_trigger_jet")
dists.append("h_pf_neutral_pt")
dists.append("h_pf_npfs")
dists.append("h_pf_neutral_eta")
dists.append("h_pf_charged_phi")
dists.append("h_pf_ntracks")
dists.append("h_trigger_ht")
dists.append("h_pf_neutral_phi")
dists.append("h_trigger_met")
dists.append("h_trig_njets")
dists.append("h_trig_mht")
dists.append("h_mht")
dists.append("h_ht")
dists.append("h_pf_charged_pt")
dists.append("h_njets")
dists.append("h_jet_phi")
dists.append("h_pf_charged_eta")
dists.append("h_pf_nneutrals")
for dist in dists:
#compareMass(2,2,"darkPho",dist)
#compareMass(2,2,"darkPhoHad",dist)
compareMass(2,2,"generic",dist)
#compareDecay(750,2,2,dist)
|
[
"import ROOT\nfrom array import array\nfrom plothelper import *\nROOT.gROOT.SetBatch(ROOT.kTRUE)\n\nsetStyle()\n\ndef adjust(hist):\n name = hist.GetName()\n #if \"ntracks\" in name: hist.Rebin()\n if \"ntracks\" in name: hist.GetXaxis().SetRangeUser(0,600)\n #if \"npfs\" in name: hist.Rebin()\n if \"npfs\" in name: \n hist.GetXaxis().SetRangeUser(0,600)\n if \"nneutrals\" in name: \n hist.GetXaxis().SetRangeUser(0,600)\n hist.GetXaxis().SetTitle(\"n neutrals\")\n return\n\ndef clean1D(hist):\n # Clean\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0/hist.Integral(0,-1))\n return hist\n\ndef get1D(mMed,mDark,temp,decay,histname):\n\n # Get hist\n filename = \"outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root\".format(mMed,mDark,temp,decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n\n return hist\n\ndef getQCD(histname):\n\n # Get hist\n filename1 = \"outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root\"# do slicing later\n filename2 = \"outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root\"# do slicing later\n filename3 = \"outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root\"# do slicing later\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207) \n hist2.Scale(119.9) \n hist3.Scale(25.24) \n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n\n return hist1\n\ndef decay_label(decay):\n if \"darkPhoHad\" in decay: return \"m_{A'}=0.7 GeV\"\n if \"darkPho\" in decay: return \"m_{A'}=0.5 GeV\"\n if \"generic\" in decay: return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\ndef makeROC(hists,labels,filename):\n c = ROOT.TCanvas(filename,\"\",800,800)\n\n dy = 0.05*len(hists)\n leg = ROOT.TLegend(0.18,0.86-dy,0.86,0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n\n for i,hist in enumerate(hists):\n if \"QCD\" in labels[i] : hbkg = hist\n\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i,hist in enumerate(hists): \n if \"QCD\" in labels[i]: continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1,hist.GetNbinsX()+1):\n eff_sig.append( hist.Integral(b,-1) )\n eff_bkg.append( hbkg.Integral(b,-1) )\n err.append(0.000000001)\n \n graph = ROOT.TGraphErrors(len(err),array(\"d\",eff_sig),array(\"d\",eff_bkg),array(\"d\",err),array(\"d\",err))\n\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph,labels[i],\"l\")\n \n mgraph.SetTitle(\";sig eff;bkg eff\")\n mgraph.Draw(\"AELP\")\n mgraph.GetYaxis().SetRangeUser(0.00000001,1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print(\"plots/{}.png\".format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n \n\ndef compare1D(hists,labels,filename):\n c = ROOT.TCanvas(filename,\"\",800,800)\n\n dy = 0.05*len(hists)\n leg = ROOT.TLegend(0.18,0.86-dy,0.86,0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n\n ymax = 0\n for i,hist in enumerate(hists): \n hist.SetLineColor(colors[i])\n if \"QCD\" in labels[i]: hist.SetLineColor(ROOT.kBlack) \n if i==0: hist.Draw(\"hist\")\n else : hist.Draw(\"hist same\")\n\n if hist.GetMaximum() > ymax: ymax=hist.GetMaximum()\n\n leg.AddEntry(hist,labels[i],\"l\")\n\n \n\n leg.Draw()\n \n c.SetLogy(1)\n hists[0].GetYaxis().SetRangeUser(0.001,ymax*100)\n c.Print(\"plots/{}_log.png\".format(filename))\n hists[0].GetYaxis().SetRangeUser(0,ymax*1.8)\n c.SetLogy(0)\n c.Print(\"plots/{}_lin.png\".format(filename))\n\ndef compareMass(temp,mDark,decay,histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed,mDark,temp,decay,histname))\n label = \"m_{S}=%i GeV, %s\"%(mMed,decay_label(decay))\n labels.append(label)\n\n hists.append(getQCD(histname))\n labels.append(\"QCD, H_{T}>1 TeV\")\n \n compare1D(hists,labels,\"compare_mMed/temp{}_mDark{}_decay_{}_{}\".format(temp,mDark,decay,histname))\n if histname==\"h_pf_ntracks\": \n makeROC(hists,labels,\"roc_curve/temp{}_mDark{}_decay_{}_{}\".format(temp,mDark,decay,histname))\n\ndef compareDecay(mMed,temp,mDark,histname):\n decays = []\n decays.append(\"darkPho\")\n decays.append(\"darkPhoHad\")\n decays.append(\"generic\")\n\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed,mDark,temp,decay,histname))\n label = \"m_{S}=%i GeV,%s\"%(mMed,decay_label(decay))\n labels.append(label)\n \n compare1D(hists,labels,\"compare_decay/mMed{}_temp{}_mDark{}_{}\".format(mMed,temp,mDark,histname))\n\n\ndists=[]\ndists.append(\"h_jet_eta\")\t\ndists.append(\"h_pf_charged_ptzoom\")\t\ndists.append(\"h_pf_neutral_e\")\t\ndists.append(\"h_trigger\")\t\ndists.append(\"h_jet_pt\")\t\ndists.append(\"h_trig_ht\")\t\ndists.append(\"h_pf_charged_qual\")\t\ndists.append(\"h_pf_neutral_ptzoom\")\t\ndists.append(\"h_trigger_jet\")\t\ndists.append(\"h_pf_neutral_pt\")\t\ndists.append(\"h_pf_npfs\")\t\ndists.append(\"h_pf_neutral_eta\")\t\ndists.append(\"h_pf_charged_phi\")\t\ndists.append(\"h_pf_ntracks\")\t\ndists.append(\"h_trigger_ht\")\t\ndists.append(\"h_pf_neutral_phi\")\t\ndists.append(\"h_trigger_met\")\t\ndists.append(\"h_trig_njets\")\t\ndists.append(\"h_trig_mht\")\t\ndists.append(\"h_mht\")\t\ndists.append(\"h_ht\")\t\ndists.append(\"h_pf_charged_pt\")\t\ndists.append(\"h_njets\")\t\ndists.append(\"h_jet_phi\")\t\ndists.append(\"h_pf_charged_eta\")\t\ndists.append(\"h_pf_nneutrals\")\n\nfor dist in dists:\n #compareMass(2,2,\"darkPho\",dist)\n #compareMass(2,2,\"darkPhoHad\",dist)\n compareMass(2,2,\"generic\",dist)\n #compareDecay(750,2,2,dist)\n",
"import ROOT\nfrom array import array\nfrom plothelper import *\nROOT.gROOT.SetBatch(ROOT.kTRUE)\nsetStyle()\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\ndef getQCD(histname):\n filename1 = (\n 'outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename2 = (\n 'outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename3 = (\n 'outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207)\n hist2.Scale(119.9)\n hist3.Scale(25.24)\n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n return hist1\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\ndef compare1D(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n ymax = 0\n for i, hist in enumerate(hists):\n hist.SetLineColor(colors[i])\n if 'QCD' in labels[i]:\n hist.SetLineColor(ROOT.kBlack)\n if i == 0:\n hist.Draw('hist')\n else:\n hist.Draw('hist same')\n if hist.GetMaximum() > ymax:\n ymax = hist.GetMaximum()\n leg.AddEntry(hist, labels[i], 'l')\n leg.Draw()\n c.SetLogy(1)\n hists[0].GetYaxis().SetRangeUser(0.001, ymax * 100)\n c.Print('plots/{}_log.png'.format(filename))\n hists[0].GetYaxis().SetRangeUser(0, ymax * 1.8)\n c.SetLogy(0)\n c.Print('plots/{}_lin.png'.format(filename))\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\ndists = []\ndists.append('h_jet_eta')\ndists.append('h_pf_charged_ptzoom')\ndists.append('h_pf_neutral_e')\ndists.append('h_trigger')\ndists.append('h_jet_pt')\ndists.append('h_trig_ht')\ndists.append('h_pf_charged_qual')\ndists.append('h_pf_neutral_ptzoom')\ndists.append('h_trigger_jet')\ndists.append('h_pf_neutral_pt')\ndists.append('h_pf_npfs')\ndists.append('h_pf_neutral_eta')\ndists.append('h_pf_charged_phi')\ndists.append('h_pf_ntracks')\ndists.append('h_trigger_ht')\ndists.append('h_pf_neutral_phi')\ndists.append('h_trigger_met')\ndists.append('h_trig_njets')\ndists.append('h_trig_mht')\ndists.append('h_mht')\ndists.append('h_ht')\ndists.append('h_pf_charged_pt')\ndists.append('h_njets')\ndists.append('h_jet_phi')\ndists.append('h_pf_charged_eta')\ndists.append('h_pf_nneutrals')\nfor dist in dists:\n compareMass(2, 2, 'generic', dist)\n",
"<import token>\nROOT.gROOT.SetBatch(ROOT.kTRUE)\nsetStyle()\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\ndef getQCD(histname):\n filename1 = (\n 'outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename2 = (\n 'outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename3 = (\n 'outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207)\n hist2.Scale(119.9)\n hist3.Scale(25.24)\n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n return hist1\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\ndef compare1D(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n ymax = 0\n for i, hist in enumerate(hists):\n hist.SetLineColor(colors[i])\n if 'QCD' in labels[i]:\n hist.SetLineColor(ROOT.kBlack)\n if i == 0:\n hist.Draw('hist')\n else:\n hist.Draw('hist same')\n if hist.GetMaximum() > ymax:\n ymax = hist.GetMaximum()\n leg.AddEntry(hist, labels[i], 'l')\n leg.Draw()\n c.SetLogy(1)\n hists[0].GetYaxis().SetRangeUser(0.001, ymax * 100)\n c.Print('plots/{}_log.png'.format(filename))\n hists[0].GetYaxis().SetRangeUser(0, ymax * 1.8)\n c.SetLogy(0)\n c.Print('plots/{}_lin.png'.format(filename))\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\ndists = []\ndists.append('h_jet_eta')\ndists.append('h_pf_charged_ptzoom')\ndists.append('h_pf_neutral_e')\ndists.append('h_trigger')\ndists.append('h_jet_pt')\ndists.append('h_trig_ht')\ndists.append('h_pf_charged_qual')\ndists.append('h_pf_neutral_ptzoom')\ndists.append('h_trigger_jet')\ndists.append('h_pf_neutral_pt')\ndists.append('h_pf_npfs')\ndists.append('h_pf_neutral_eta')\ndists.append('h_pf_charged_phi')\ndists.append('h_pf_ntracks')\ndists.append('h_trigger_ht')\ndists.append('h_pf_neutral_phi')\ndists.append('h_trigger_met')\ndists.append('h_trig_njets')\ndists.append('h_trig_mht')\ndists.append('h_mht')\ndists.append('h_ht')\ndists.append('h_pf_charged_pt')\ndists.append('h_njets')\ndists.append('h_jet_phi')\ndists.append('h_pf_charged_eta')\ndists.append('h_pf_nneutrals')\nfor dist in dists:\n compareMass(2, 2, 'generic', dist)\n",
"<import token>\nROOT.gROOT.SetBatch(ROOT.kTRUE)\nsetStyle()\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\ndef getQCD(histname):\n filename1 = (\n 'outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename2 = (\n 'outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename3 = (\n 'outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207)\n hist2.Scale(119.9)\n hist3.Scale(25.24)\n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n return hist1\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\ndef compare1D(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n ymax = 0\n for i, hist in enumerate(hists):\n hist.SetLineColor(colors[i])\n if 'QCD' in labels[i]:\n hist.SetLineColor(ROOT.kBlack)\n if i == 0:\n hist.Draw('hist')\n else:\n hist.Draw('hist same')\n if hist.GetMaximum() > ymax:\n ymax = hist.GetMaximum()\n leg.AddEntry(hist, labels[i], 'l')\n leg.Draw()\n c.SetLogy(1)\n hists[0].GetYaxis().SetRangeUser(0.001, ymax * 100)\n c.Print('plots/{}_log.png'.format(filename))\n hists[0].GetYaxis().SetRangeUser(0, ymax * 1.8)\n c.SetLogy(0)\n c.Print('plots/{}_lin.png'.format(filename))\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\n<assignment token>\ndists.append('h_jet_eta')\ndists.append('h_pf_charged_ptzoom')\ndists.append('h_pf_neutral_e')\ndists.append('h_trigger')\ndists.append('h_jet_pt')\ndists.append('h_trig_ht')\ndists.append('h_pf_charged_qual')\ndists.append('h_pf_neutral_ptzoom')\ndists.append('h_trigger_jet')\ndists.append('h_pf_neutral_pt')\ndists.append('h_pf_npfs')\ndists.append('h_pf_neutral_eta')\ndists.append('h_pf_charged_phi')\ndists.append('h_pf_ntracks')\ndists.append('h_trigger_ht')\ndists.append('h_pf_neutral_phi')\ndists.append('h_trigger_met')\ndists.append('h_trig_njets')\ndists.append('h_trig_mht')\ndists.append('h_mht')\ndists.append('h_ht')\ndists.append('h_pf_charged_pt')\ndists.append('h_njets')\ndists.append('h_jet_phi')\ndists.append('h_pf_charged_eta')\ndists.append('h_pf_nneutrals')\nfor dist in dists:\n compareMass(2, 2, 'generic', dist)\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\ndef getQCD(histname):\n filename1 = (\n 'outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename2 = (\n 'outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename3 = (\n 'outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207)\n hist2.Scale(119.9)\n hist3.Scale(25.24)\n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n return hist1\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\ndef compare1D(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n ymax = 0\n for i, hist in enumerate(hists):\n hist.SetLineColor(colors[i])\n if 'QCD' in labels[i]:\n hist.SetLineColor(ROOT.kBlack)\n if i == 0:\n hist.Draw('hist')\n else:\n hist.Draw('hist same')\n if hist.GetMaximum() > ymax:\n ymax = hist.GetMaximum()\n leg.AddEntry(hist, labels[i], 'l')\n leg.Draw()\n c.SetLogy(1)\n hists[0].GetYaxis().SetRangeUser(0.001, ymax * 100)\n c.Print('plots/{}_log.png'.format(filename))\n hists[0].GetYaxis().SetRangeUser(0, ymax * 1.8)\n c.SetLogy(0)\n c.Print('plots/{}_lin.png'.format(filename))\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\ndef getQCD(histname):\n filename1 = (\n 'outputs/hist_QCD_HT1000to1500_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename2 = (\n 'outputs/hist_QCD_HT1500to2000_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n filename3 = (\n 'outputs/hist_QCD_HT2000toInf_TuneCP5_13TeV-madgraphMLM-pythia8.root')\n f1 = ROOT.TFile.Open(filename1)\n f2 = ROOT.TFile.Open(filename2)\n f3 = ROOT.TFile.Open(filename3)\n hist1 = f1.Get(histname)\n hist2 = f2.Get(histname)\n hist3 = f3.Get(histname)\n hist1.Scale(1207)\n hist2.Scale(119.9)\n hist3.Scale(25.24)\n hist1.Add(hist2)\n hist2.Add(hist3)\n clean1D(hist1)\n return hist1\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\ndef compareDecay(mMed, temp, mDark, histname):\n decays = []\n decays.append('darkPho')\n decays.append('darkPhoHad')\n decays.append('generic')\n hists = []\n labels = []\n for decay in decays:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV,%s' % (mMed, decay_label(decay))\n labels.append(label)\n compare1D(hists, labels, 'compare_decay/mMed{}_temp{}_mDark{}_{}'.\n format(mMed, temp, mDark, histname))\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n\n\ndef decay_label(decay):\n if 'darkPhoHad' in decay:\n return \"m_{A'}=0.7 GeV\"\n if 'darkPho' in decay:\n return \"m_{A'}=0.5 GeV\"\n if 'generic' in decay:\n return \"m_{A'}=m_{#phi}/2, A'#rightarrowu#bar{u}\"\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\ndef clean1D(hist):\n adjust(hist)\n hist.SetLineWidth(2)\n hist.GetYaxis().SetNdivisions(505)\n hist.GetXaxis().SetNdivisions(505)\n hist.SetDirectory(0)\n hist.Scale(1.0 / hist.Integral(0, -1))\n return hist\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n<function token>\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\n<function token>\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n<function token>\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n\n\ndef compareMass(temp, mDark, decay, histname):\n mMeds = []\n mMeds.append(125)\n mMeds.append(400)\n mMeds.append(750)\n mMeds.append(1000)\n hists = []\n labels = []\n for mMed in mMeds:\n hists.append(get1D(mMed, mDark, temp, decay, histname))\n label = 'm_{S}=%i GeV, %s' % (mMed, decay_label(decay))\n labels.append(label)\n hists.append(getQCD(histname))\n labels.append('QCD, H_{T}>1 TeV')\n compare1D(hists, labels, 'compare_mMed/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n if histname == 'h_pf_ntracks':\n makeROC(hists, labels, 'roc_curve/temp{}_mDark{}_decay_{}_{}'.\n format(temp, mDark, decay, histname))\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n\n\ndef adjust(hist):\n name = hist.GetName()\n if 'ntracks' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'npfs' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n if 'nneutrals' in name:\n hist.GetXaxis().SetRangeUser(0, 600)\n hist.GetXaxis().SetTitle('n neutrals')\n return\n\n\n<function token>\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n<function token>\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n<function token>\n\n\ndef makeROC(hists, labels, filename):\n c = ROOT.TCanvas(filename, '', 800, 800)\n dy = 0.05 * len(hists)\n leg = ROOT.TLegend(0.18, 0.86 - dy, 0.86, 0.86)\n leg.SetTextSize(0.04)\n leg.SetBorderSize(0)\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n hbkg = hist\n ymax = 0\n mgraph = ROOT.TMultiGraph()\n for i, hist in enumerate(hists):\n if 'QCD' in labels[i]:\n continue\n eff_sig = []\n eff_bkg = []\n err = []\n for b in range(1, hist.GetNbinsX() + 1):\n eff_sig.append(hist.Integral(b, -1))\n eff_bkg.append(hbkg.Integral(b, -1))\n err.append(1e-09)\n graph = ROOT.TGraphErrors(len(err), array('d', eff_sig), array('d',\n eff_bkg), array('d', err), array('d', err))\n graph.SetLineColor(colors[i])\n mgraph.Add(graph)\n leg.AddEntry(graph, labels[i], 'l')\n mgraph.SetTitle(';sig eff;bkg eff')\n mgraph.Draw('AELP')\n mgraph.GetYaxis().SetRangeUser(1e-08, 1)\n leg.Draw()\n c.SetLogy(1)\n c.SetLogx(0)\n c.Print('plots/{}.png'.format(filename))\n c.SetLogy(0)\n c.SetLogx(0)\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n\n\ndef get1D(mMed, mDark, temp, decay, histname):\n filename = 'outputs/hist_mMed-{}_mDark-{}_temp-{}_decay-{}.root'.format(\n mMed, mDark, temp, decay)\n f = ROOT.TFile.Open(filename)\n hist = f.Get(histname)\n clean1D(hist)\n return hist\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,535 |
6761763bac39e70167f8d24d3148345889b4ed78
|
from igraph import Graph, Vertex
from typing import List, Tuple
from collections import Counter
from graph_utils import set_name
def is_empty_graph(graph: Graph):
return len(graph.es) == 0
def find_maximum(item_list: List[Tuple[int, int]]) -> List[int]:
max_item = None
maximum_indices = []
for index, item in item_list:
if max_item is None or item > max_item:
max_item = item
maximum_indices = [index]
elif item == max_item:
maximum_indices.append(index)
return maximum_indices
def compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex: Vertex):
degree_vector_for_vertex = [degree_per_vector[neighbour.index] for neighbour in vertex.neighbors()]
return Counter(degree_vector_for_vertex)
def select_vertices(graph: Graph) -> int:
degree_per_vector = [v.degree() for v in graph.vs]
counter_per_vertex = [compute_neighbour_degree_frequency(degree_per_vector, vertex) for vertex in graph.vs]
degree = 1
index_of_vertex_with_most_neighbours_of_x_degree = None
scan_only_indices = [i for i in range(len(graph.vs))]
while index_of_vertex_with_most_neighbours_of_x_degree is None:
how_many_neighbours_of_x_degree_per_vertex = \
[(index, counter_per_vertex[index][degree]) for index in scan_only_indices]
maximum_indices = find_maximum(how_many_neighbours_of_x_degree_per_vertex)
if len(maximum_indices) > 1:
scan_only_indices = maximum_indices
degree = degree + 1
if degree > len(graph.vs): # if degree > vertex number, just take the first one
index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[0]
else:
index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[0]
return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']
def zero_vertices(graph: Graph, selected_vertices: List[int]):
selected_set = graph.vs.select(name_in=selected_vertices)
graph.delete_vertices(selected_set)
def remove_vertex_and_neighbors(graph: Graph, v: Vertex):
graph.delete_vertices([v.index] + [ve.index for ve in v.neighbors()])
def most_neighbors_with_minimal_degree_algo(_, orig: Graph):
cover_group = []
graph: Graph = orig.copy()
set_name(graph)
while not is_empty_graph(graph):
selected_vertex = select_vertices(graph)
zero_vertices(graph, [selected_vertex])
cover_group = cover_group + [selected_vertex]
return cover_group
|
[
"from igraph import Graph, Vertex\nfrom typing import List, Tuple\nfrom collections import Counter\n\nfrom graph_utils import set_name\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\ndef find_maximum(item_list: List[Tuple[int, int]]) -> List[int]:\n max_item = None\n maximum_indices = []\n for index, item in item_list:\n if max_item is None or item > max_item:\n max_item = item\n maximum_indices = [index]\n elif item == max_item:\n maximum_indices.append(index)\n\n return maximum_indices\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex: Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) -> int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(degree_per_vector, vertex) for vertex in graph.vs]\n\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = \\\n [(index, counter_per_vertex[index][degree]) for index in scan_only_indices]\n\n maximum_indices = find_maximum(how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n\n if degree > len(graph.vs): # if degree > vertex number, just take the first one\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[0]\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[0]\n\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\ndef remove_vertex_and_neighbors(graph: Graph, v: Vertex):\n graph.delete_vertices([v.index] + [ve.index for ve in v.neighbors()])\n\n\ndef most_neighbors_with_minimal_degree_algo(_, orig: Graph):\n cover_group = []\n graph: Graph = orig.copy()\n set_name(graph)\n\n while not is_empty_graph(graph):\n selected_vertex = select_vertices(graph)\n zero_vertices(graph, [selected_vertex])\n cover_group = cover_group + [selected_vertex]\n return cover_group\n",
"from igraph import Graph, Vertex\nfrom typing import List, Tuple\nfrom collections import Counter\nfrom graph_utils import set_name\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\ndef find_maximum(item_list: List[Tuple[int, int]]) ->List[int]:\n max_item = None\n maximum_indices = []\n for index, item in item_list:\n if max_item is None or item > max_item:\n max_item = item\n maximum_indices = [index]\n elif item == max_item:\n maximum_indices.append(index)\n return maximum_indices\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex:\n Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for\n neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\ndef remove_vertex_and_neighbors(graph: Graph, v: Vertex):\n graph.delete_vertices([v.index] + [ve.index for ve in v.neighbors()])\n\n\ndef most_neighbors_with_minimal_degree_algo(_, orig: Graph):\n cover_group = []\n graph: Graph = orig.copy()\n set_name(graph)\n while not is_empty_graph(graph):\n selected_vertex = select_vertices(graph)\n zero_vertices(graph, [selected_vertex])\n cover_group = cover_group + [selected_vertex]\n return cover_group\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\ndef find_maximum(item_list: List[Tuple[int, int]]) ->List[int]:\n max_item = None\n maximum_indices = []\n for index, item in item_list:\n if max_item is None or item > max_item:\n max_item = item\n maximum_indices = [index]\n elif item == max_item:\n maximum_indices.append(index)\n return maximum_indices\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex:\n Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for\n neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\ndef remove_vertex_and_neighbors(graph: Graph, v: Vertex):\n graph.delete_vertices([v.index] + [ve.index for ve in v.neighbors()])\n\n\ndef most_neighbors_with_minimal_degree_algo(_, orig: Graph):\n cover_group = []\n graph: Graph = orig.copy()\n set_name(graph)\n while not is_empty_graph(graph):\n selected_vertex = select_vertices(graph)\n zero_vertices(graph, [selected_vertex])\n cover_group = cover_group + [selected_vertex]\n return cover_group\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex:\n Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for\n neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\ndef remove_vertex_and_neighbors(graph: Graph, v: Vertex):\n graph.delete_vertices([v.index] + [ve.index for ve in v.neighbors()])\n\n\ndef most_neighbors_with_minimal_degree_algo(_, orig: Graph):\n cover_group = []\n graph: Graph = orig.copy()\n set_name(graph)\n while not is_empty_graph(graph):\n selected_vertex = select_vertices(graph)\n zero_vertices(graph, [selected_vertex])\n cover_group = cover_group + [selected_vertex]\n return cover_group\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex:\n Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for\n neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\n<function token>\n\n\ndef most_neighbors_with_minimal_degree_algo(_, orig: Graph):\n cover_group = []\n graph: Graph = orig.copy()\n set_name(graph)\n while not is_empty_graph(graph):\n selected_vertex = select_vertices(graph)\n zero_vertices(graph, [selected_vertex])\n cover_group = cover_group + [selected_vertex]\n return cover_group\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n\n\ndef compute_neighbour_degree_frequency(degree_per_vector: List[int], vertex:\n Vertex):\n degree_vector_for_vertex = [degree_per_vector[neighbour.index] for\n neighbour in vertex.neighbors()]\n return Counter(degree_vector_for_vertex)\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n<function token>\n\n\ndef select_vertices(graph: Graph) ->int:\n degree_per_vector = [v.degree() for v in graph.vs]\n counter_per_vertex = [compute_neighbour_degree_frequency(\n degree_per_vector, vertex) for vertex in graph.vs]\n degree = 1\n index_of_vertex_with_most_neighbours_of_x_degree = None\n scan_only_indices = [i for i in range(len(graph.vs))]\n while index_of_vertex_with_most_neighbours_of_x_degree is None:\n how_many_neighbours_of_x_degree_per_vertex = [(index,\n counter_per_vertex[index][degree]) for index in scan_only_indices]\n maximum_indices = find_maximum(\n how_many_neighbours_of_x_degree_per_vertex)\n if len(maximum_indices) > 1:\n scan_only_indices = maximum_indices\n degree = degree + 1\n if degree > len(graph.vs):\n index_of_vertex_with_most_neighbours_of_x_degree = (\n maximum_indices[0])\n else:\n index_of_vertex_with_most_neighbours_of_x_degree = maximum_indices[\n 0]\n return graph.vs[index_of_vertex_with_most_neighbours_of_x_degree]['name']\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef zero_vertices(graph: Graph, selected_vertices: List[int]):\n selected_set = graph.vs.select(name_in=selected_vertices)\n graph.delete_vertices(selected_set)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef is_empty_graph(graph: Graph):\n return len(graph.es) == 0\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,536 |
e0428a6b72fef4f3c6a52a6c34b30df1dd23f4ed
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
import os
import os.path as osp
import PIL
root_dir = osp.join(osp.dirname(__file__), '..')
data_dir = osp.join(root_dir, 'dataset/data')
class imdb(object):
def __init__(self, name):
self._name = name
self._classes = []
self._image_index = []
self._roidb = None
self._roidb_handler = self.default_roidb
@property
def name(self):
return self._name
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def num_classes(self):
return len(self._classes)
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
@property
def roidb(self):
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
def default_roidb(self):
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in range(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in range(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
@property
def num_images(self):
return len(self._image_index)
def image_path_at(self, i):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(data_dir, 'cache'))
if not osp.exists(cache_path):
os.makedirs(cache_path)
return cache_path
|
[
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport os.path as osp\nimport PIL\n\nroot_dir = osp.join(osp.dirname(__file__), '..')\ndata_dir = osp.join(root_dir, 'dataset/data')\n\nclass imdb(object):\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes,\n 'gt_overlaps': self.roidb[i]['gt_overlaps'],\n 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n\n\n\n\n\n\n\n\n\n\n\n",
"from __future__ import print_function\nfrom __future__ import absolute_import\nimport os\nimport os.path as osp\nimport PIL\nroot_dir = osp.join(osp.dirname(__file__), '..')\ndata_dir = osp.join(root_dir, 'dataset/data')\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\nroot_dir = osp.join(osp.dirname(__file__), '..')\ndata_dir = osp.join(root_dir, 'dataset/data')\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n <function token>\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n\n def __init__(self, name):\n self._name = name\n self._classes = []\n self._image_index = []\n self._roidb = None\n self._roidb_handler = self.default_roidb\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(data_dir, 'cache'))\n if not osp.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0] for i in\n range(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n\n @property\n def roidb(self):\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n def default_roidb(self):\n raise NotImplementedError\n <function token>\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n <function token>\n <function token>\n\n def default_roidb(self):\n raise NotImplementedError\n <function token>\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def default_roidb(self):\n raise NotImplementedError\n <function token>\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in range(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes': boxes, 'gt_overlaps': self.roidb[i][\n 'gt_overlaps'], 'gt_classes': self.roidb[i]['gt_classes'],\n 'flipped': True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def default_roidb(self):\n raise NotImplementedError\n <function token>\n <function token>\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n\n @property\n def name(self):\n return self._name\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def num_images(self):\n return len(self._image_index)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass imdb(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
99,537 |
4066a1bd7f5383f008dd937252b6ad57f050a628
|
#!/usr/bin/env python
# encoding: utf-8
"""
get_snapshot.py
Fetch project from synergy and store on disc
Created by Aske Olsson 2011-09-22.
Copyright (c) 2011 Aske Olsson. All rights reserved.
"""
from ccm_objects_in_project import get_objects_in_project
import os
def get_snapshot(project, ccm, outdir):
if not outdir.endswith('/'):
outdir += '/'
# get all objects in the project
objects = get_objects_in_project(project, ccm)
# write the objects to outdir
for object, paths in objects.items():
# print(object, paths)
if not ':dir:' in object and not ':project:' in object:
content = ccm.cat(object).run()
for path in paths:
p = outdir + path
dir = os.path.split(p)[0]
if not os.path.exists(dir):
os.makedirs(dir)
print(("Writing %s to %s" %(object, p)))
f = open(p, 'wb')
f.write(content)
f.close()
# handle empty dirs by adding .gitignore to empty leaf dirs
empty_dirs = get_empty_dirs(objects)
write_empty_dirs(empty_dirs, outdir)
def write_empty_dirs(dirs, outdir):
for dir in dirs:
path = os.path.join(outdir, dir)
filepath = os.path.join(path, '.gitignore')
if not os.path.exists(path):
os.makedirs(path)
print(("Writing empty .gitignore to %s" %filepath))
f = open(filepath, 'wb')
f.write('')
f.close()
def get_empty_dirs(objects):
dirs = [d for o, paths in objects.items() for d in paths if ':dir:' in o]
file_dirs = [d.rsplit('/',1)[0] for o, paths in objects.items() for d in paths if ':dir:' not in o and ':project:' not in o]
leaf_dirs = get_leaf_dirs(dirs)
empty_leaves = set(leaf_dirs) - set(file_dirs)
return empty_leaves
def get_leaf_dirs(dirs):
res = [sorted(dirs)[0]]
previous = res[0]
for dir in sorted(dirs):
if previous in dir:
res.remove(previous)
res.append(dir)
previous = dir
return res
def main():
pass
if __name__ == '__main__':
main()
|
[
"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nget_snapshot.py\n\nFetch project from synergy and store on disc\n\nCreated by Aske Olsson 2011-09-22.\nCopyright (c) 2011 Aske Olsson. All rights reserved.\n\"\"\"\n\nfrom ccm_objects_in_project import get_objects_in_project\nimport os\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n # get all objects in the project\n objects = get_objects_in_project(project, ccm)\n\n # write the objects to outdir\n for object, paths in objects.items():\n# print(object, paths)\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print((\"Writing %s to %s\" %(object, p)))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n\n # handle empty dirs by adding .gitignore to empty leaf dirs\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\ndef write_empty_dirs(dirs, outdir):\n for dir in dirs:\n path = os.path.join(outdir, dir)\n filepath = os.path.join(path, '.gitignore')\n if not os.path.exists(path):\n os.makedirs(path)\n print((\"Writing empty .gitignore to %s\" %filepath))\n f = open(filepath, 'wb')\n f.write('')\n f.close()\n\ndef get_empty_dirs(objects):\n dirs = [d for o, paths in objects.items() for d in paths if ':dir:' in o]\n file_dirs = [d.rsplit('/',1)[0] for o, paths in objects.items() for d in paths if ':dir:' not in o and ':project:' not in o]\n leaf_dirs = get_leaf_dirs(dirs)\n empty_leaves = set(leaf_dirs) - set(file_dirs)\n return empty_leaves\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\nfrom ccm_objects_in_project import get_objects_in_project\nimport os\n\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n objects = get_objects_in_project(project, ccm)\n for object, paths in objects.items():\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('Writing %s to %s' % (object, p))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\n\ndef write_empty_dirs(dirs, outdir):\n for dir in dirs:\n path = os.path.join(outdir, dir)\n filepath = os.path.join(path, '.gitignore')\n if not os.path.exists(path):\n os.makedirs(path)\n print('Writing empty .gitignore to %s' % filepath)\n f = open(filepath, 'wb')\n f.write('')\n f.close()\n\n\ndef get_empty_dirs(objects):\n dirs = [d for o, paths in objects.items() for d in paths if ':dir:' in o]\n file_dirs = [d.rsplit('/', 1)[0] for o, paths in objects.items() for d in\n paths if ':dir:' not in o and ':project:' not in o]\n leaf_dirs = get_leaf_dirs(dirs)\n empty_leaves = set(leaf_dirs) - set(file_dirs)\n return empty_leaves\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n objects = get_objects_in_project(project, ccm)\n for object, paths in objects.items():\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('Writing %s to %s' % (object, p))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\n\ndef write_empty_dirs(dirs, outdir):\n for dir in dirs:\n path = os.path.join(outdir, dir)\n filepath = os.path.join(path, '.gitignore')\n if not os.path.exists(path):\n os.makedirs(path)\n print('Writing empty .gitignore to %s' % filepath)\n f = open(filepath, 'wb')\n f.write('')\n f.close()\n\n\ndef get_empty_dirs(objects):\n dirs = [d for o, paths in objects.items() for d in paths if ':dir:' in o]\n file_dirs = [d.rsplit('/', 1)[0] for o, paths in objects.items() for d in\n paths if ':dir:' not in o and ':project:' not in o]\n leaf_dirs = get_leaf_dirs(dirs)\n empty_leaves = set(leaf_dirs) - set(file_dirs)\n return empty_leaves\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n objects = get_objects_in_project(project, ccm)\n for object, paths in objects.items():\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('Writing %s to %s' % (object, p))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\n\ndef write_empty_dirs(dirs, outdir):\n for dir in dirs:\n path = os.path.join(outdir, dir)\n filepath = os.path.join(path, '.gitignore')\n if not os.path.exists(path):\n os.makedirs(path)\n print('Writing empty .gitignore to %s' % filepath)\n f = open(filepath, 'wb')\n f.write('')\n f.close()\n\n\ndef get_empty_dirs(objects):\n dirs = [d for o, paths in objects.items() for d in paths if ':dir:' in o]\n file_dirs = [d.rsplit('/', 1)[0] for o, paths in objects.items() for d in\n paths if ':dir:' not in o and ':project:' not in o]\n leaf_dirs = get_leaf_dirs(dirs)\n empty_leaves = set(leaf_dirs) - set(file_dirs)\n return empty_leaves\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n objects = get_objects_in_project(project, ccm)\n for object, paths in objects.items():\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('Writing %s to %s' % (object, p))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\n\ndef write_empty_dirs(dirs, outdir):\n for dir in dirs:\n path = os.path.join(outdir, dir)\n filepath = os.path.join(path, '.gitignore')\n if not os.path.exists(path):\n os.makedirs(path)\n print('Writing empty .gitignore to %s' % filepath)\n f = open(filepath, 'wb')\n f.write('')\n f.close()\n\n\n<function token>\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef get_snapshot(project, ccm, outdir):\n if not outdir.endswith('/'):\n outdir += '/'\n objects = get_objects_in_project(project, ccm)\n for object, paths in objects.items():\n if not ':dir:' in object and not ':project:' in object:\n content = ccm.cat(object).run()\n for path in paths:\n p = outdir + path\n dir = os.path.split(p)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n print('Writing %s to %s' % (object, p))\n f = open(p, 'wb')\n f.write(content)\n f.close()\n empty_dirs = get_empty_dirs(objects)\n write_empty_dirs(empty_dirs, outdir)\n\n\n<function token>\n<function token>\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\ndef main():\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_leaf_dirs(dirs):\n res = [sorted(dirs)[0]]\n previous = res[0]\n for dir in sorted(dirs):\n if previous in dir:\n res.remove(previous)\n res.append(dir)\n previous = dir\n return res\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,538 |
bd149b5e0505470f7801486f979697d0bab829f6
|
from typing import Optional, Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import crud
from app.api import deps
router = APIRouter(prefix='/question')
@router.get('/')
async def get_question(
qid: Optional[str] = None,
subject: Optional[str] = None,
is_simple: bool = True,
db: Session = Depends(deps.get_db)
) -> Any:
if qid:
return crud.item.get_by_id(db, qid)
if subject:
results = crud.item.get_by_subject_all(db, subject)
if not is_simple:
return results
return [
{
"question_id": result.id,
"answer": result.answer
}
for result in results
]
return 'no function'
@router.get('/random')
async def get_question_random(
subject: Optional[str] = None,
db: Session = Depends(deps.get_db)
) -> Any:
"""
get question
"""
if subject:
return crud.item.get_by_random(db, subject)
else:
return crud.item.get_by_random(db)
@router.get('/order')
async def get_question_order(
subject: str,
order: int,
db: Session = Depends(deps.get_db)
) -> Any:
return crud.item.get_by_subject_order(db, subject, order)
|
[
"from typing import Optional, Any\n\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom app import crud\nfrom app.api import deps\n\n\nrouter = APIRouter(prefix='/question')\n\n\[email protected]('/')\nasync def get_question(\n qid: Optional[str] = None,\n subject: Optional[str] = None,\n is_simple: bool = True,\n db: Session = Depends(deps.get_db)\n) -> Any:\n if qid:\n return crud.item.get_by_id(db, qid)\n if subject:\n results = crud.item.get_by_subject_all(db, subject)\n if not is_simple:\n return results\n return [\n {\n \"question_id\": result.id,\n \"answer\": result.answer\n }\n for result in results\n ]\n return 'no function'\n\n\[email protected]('/random')\nasync def get_question_random(\n subject: Optional[str] = None,\n db: Session = Depends(deps.get_db)\n) -> Any:\n \"\"\"\n get question\n \"\"\"\n if subject:\n return crud.item.get_by_random(db, subject)\n else:\n return crud.item.get_by_random(db)\n\n\[email protected]('/order')\nasync def get_question_order(\n subject: str,\n order: int,\n db: Session = Depends(deps.get_db)\n) -> Any:\n return crud.item.get_by_subject_order(db, subject, order)\n",
"from typing import Optional, Any\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom app import crud\nfrom app.api import deps\nrouter = APIRouter(prefix='/question')\n\n\[email protected]('/')\nasync def get_question(qid: Optional[str]=None, subject: Optional[str]=None,\n is_simple: bool=True, db: Session=Depends(deps.get_db)) ->Any:\n if qid:\n return crud.item.get_by_id(db, qid)\n if subject:\n results = crud.item.get_by_subject_all(db, subject)\n if not is_simple:\n return results\n return [{'question_id': result.id, 'answer': result.answer} for\n result in results]\n return 'no function'\n\n\[email protected]('/random')\nasync def get_question_random(subject: Optional[str]=None, db: Session=\n Depends(deps.get_db)) ->Any:\n \"\"\"\n get question\n \"\"\"\n if subject:\n return crud.item.get_by_random(db, subject)\n else:\n return crud.item.get_by_random(db)\n\n\[email protected]('/order')\nasync def get_question_order(subject: str, order: int, db: Session=Depends(\n deps.get_db)) ->Any:\n return crud.item.get_by_subject_order(db, subject, order)\n",
"<import token>\nrouter = APIRouter(prefix='/question')\n\n\[email protected]('/')\nasync def get_question(qid: Optional[str]=None, subject: Optional[str]=None,\n is_simple: bool=True, db: Session=Depends(deps.get_db)) ->Any:\n if qid:\n return crud.item.get_by_id(db, qid)\n if subject:\n results = crud.item.get_by_subject_all(db, subject)\n if not is_simple:\n return results\n return [{'question_id': result.id, 'answer': result.answer} for\n result in results]\n return 'no function'\n\n\[email protected]('/random')\nasync def get_question_random(subject: Optional[str]=None, db: Session=\n Depends(deps.get_db)) ->Any:\n \"\"\"\n get question\n \"\"\"\n if subject:\n return crud.item.get_by_random(db, subject)\n else:\n return crud.item.get_by_random(db)\n\n\[email protected]('/order')\nasync def get_question_order(subject: str, order: int, db: Session=Depends(\n deps.get_db)) ->Any:\n return crud.item.get_by_subject_order(db, subject, order)\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\nasync def get_question(qid: Optional[str]=None, subject: Optional[str]=None,\n is_simple: bool=True, db: Session=Depends(deps.get_db)) ->Any:\n if qid:\n return crud.item.get_by_id(db, qid)\n if subject:\n results = crud.item.get_by_subject_all(db, subject)\n if not is_simple:\n return results\n return [{'question_id': result.id, 'answer': result.answer} for\n result in results]\n return 'no function'\n\n\[email protected]('/random')\nasync def get_question_random(subject: Optional[str]=None, db: Session=\n Depends(deps.get_db)) ->Any:\n \"\"\"\n get question\n \"\"\"\n if subject:\n return crud.item.get_by_random(db, subject)\n else:\n return crud.item.get_by_random(db)\n\n\[email protected]('/order')\nasync def get_question_order(subject: str, order: int, db: Session=Depends(\n deps.get_db)) ->Any:\n return crud.item.get_by_subject_order(db, subject, order)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,539 |
c7b2850666e4157835580b46256dd1e2e9b63693
|
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from src.pages.base_page import BasePage
class DashboardPage(BasePage):
DASHBOARD_BUTTON = (By.ID, "home_link")
PROJECTS_BUTTON = (By.ID, "browse_link")
ISSUES_BUTTON = (By.ID, "find_link")
CREATE_BUTTON = (By.ID, "create_link")
QUICK_SEARCH_FIELD = (By.ID, "quickSearchInput")
PROFILE_LOGO = (By.ID, "header-details-user-fullname")
CURRENT_SEARCH_ISSUES_SUBMENU_OPTION = (By.ID, "jira.top.navigation.bar:issues_drop_current_lnk")
SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION = (By.ID, "issues_new_search_link")
VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION = (By.ID, "dash_lnk_system_lnk")
def __init__(self, driver):
self.driver = driver
def at_page(self):
return ("System Dashboard - Hillel IT School JIRA" in self.driver.title) & (self.is_element_visible(self.PROFILE_LOGO))
def open_create_issue_page(self):
self.is_element_visible(self.CREATE_BUTTON)
self.driver.find_element(*self.CREATE_BUTTON).click()
def open_search_issues_page(self):
self.is_element_visible(self.ISSUES_BUTTON)
self.driver.find_element(*self.ISSUES_BUTTON).click()
self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)
self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION).click()
def open_dashboard_page(self):
self.is_element_visible(self.DASHBOARD_BUTTON)
self.driver.find_element(*self.DASHBOARD_BUTTON).click()
self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)
self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION).click()
def quick_search(self, search_text):
self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(search_text)
self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER)
def open_issue(self, issue_id):
self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)
self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER)
|
[
"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom src.pages.base_page import BasePage\n\n\nclass DashboardPage(BasePage):\n\n DASHBOARD_BUTTON = (By.ID, \"home_link\")\n PROJECTS_BUTTON = (By.ID, \"browse_link\")\n ISSUES_BUTTON = (By.ID, \"find_link\")\n CREATE_BUTTON = (By.ID, \"create_link\")\n QUICK_SEARCH_FIELD = (By.ID, \"quickSearchInput\")\n PROFILE_LOGO = (By.ID, \"header-details-user-fullname\")\n CURRENT_SEARCH_ISSUES_SUBMENU_OPTION = (By.ID, \"jira.top.navigation.bar:issues_drop_current_lnk\")\n SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION = (By.ID, \"issues_new_search_link\")\n VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION = (By.ID, \"dash_lnk_system_lnk\")\n\n def __init__(self, driver):\n self.driver = driver\n\n def at_page(self):\n return (\"System Dashboard - Hillel IT School JIRA\" in self.driver.title) & (self.is_element_visible(self.PROFILE_LOGO))\n\n def open_create_issue_page(self):\n self.is_element_visible(self.CREATE_BUTTON)\n self.driver.find_element(*self.CREATE_BUTTON).click()\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER)\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER)\n",
"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom src.pages.base_page import BasePage\n\n\nclass DashboardPage(BasePage):\n DASHBOARD_BUTTON = By.ID, 'home_link'\n PROJECTS_BUTTON = By.ID, 'browse_link'\n ISSUES_BUTTON = By.ID, 'find_link'\n CREATE_BUTTON = By.ID, 'create_link'\n QUICK_SEARCH_FIELD = By.ID, 'quickSearchInput'\n PROFILE_LOGO = By.ID, 'header-details-user-fullname'\n CURRENT_SEARCH_ISSUES_SUBMENU_OPTION = (By.ID,\n 'jira.top.navigation.bar:issues_drop_current_lnk')\n SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION = By.ID, 'issues_new_search_link'\n VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION = By.ID, 'dash_lnk_system_lnk'\n\n def __init__(self, driver):\n self.driver = driver\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n\n def open_create_issue_page(self):\n self.is_element_visible(self.CREATE_BUTTON)\n self.driver.find_element(*self.CREATE_BUTTON).click()\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n DASHBOARD_BUTTON = By.ID, 'home_link'\n PROJECTS_BUTTON = By.ID, 'browse_link'\n ISSUES_BUTTON = By.ID, 'find_link'\n CREATE_BUTTON = By.ID, 'create_link'\n QUICK_SEARCH_FIELD = By.ID, 'quickSearchInput'\n PROFILE_LOGO = By.ID, 'header-details-user-fullname'\n CURRENT_SEARCH_ISSUES_SUBMENU_OPTION = (By.ID,\n 'jira.top.navigation.bar:issues_drop_current_lnk')\n SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION = By.ID, 'issues_new_search_link'\n VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION = By.ID, 'dash_lnk_system_lnk'\n\n def __init__(self, driver):\n self.driver = driver\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n\n def open_create_issue_page(self):\n self.is_element_visible(self.CREATE_BUTTON)\n self.driver.find_element(*self.CREATE_BUTTON).click()\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, driver):\n self.driver = driver\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n\n def open_create_issue_page(self):\n self.is_element_visible(self.CREATE_BUTTON)\n self.driver.find_element(*self.CREATE_BUTTON).click()\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n\n def open_create_issue_page(self):\n self.is_element_visible(self.CREATE_BUTTON)\n self.driver.find_element(*self.CREATE_BUTTON).click()\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n <function token>\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n\n def open_issue(self, issue_id):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(issue_id)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n <function token>\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n\n def quick_search(self, search_text):\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(\n search_text)\n self.driver.find_element(*self.QUICK_SEARCH_FIELD).send_keys(Keys.ENTER\n )\n <function token>\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n <function token>\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n\n def open_dashboard_page(self):\n self.is_element_visible(self.DASHBOARD_BUTTON)\n self.driver.find_element(*self.DASHBOARD_BUTTON).click()\n self.is_element_visible(self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION)\n self.driver.find_element(*self.VIEW_SYSTEM_DASHBOARD_SUBMENU_OPTION\n ).click()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n <function token>\n\n def open_search_issues_page(self):\n self.is_element_visible(self.ISSUES_BUTTON)\n self.driver.find_element(*self.ISSUES_BUTTON).click()\n self.is_element_visible(self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION)\n self.driver.find_element(*self.SEARCH_FOR_ISSUES_ISSUES_SUBMENU_OPTION\n ).click()\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def at_page(self):\n return ('System Dashboard - Hillel IT School JIRA' in self.driver.title\n ) & self.is_element_visible(self.PROFILE_LOGO)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass DashboardPage(BasePage):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,540 |
322389e566466f0455eefe0d91fee05457fe987a
|
class Scheduler:
currently_loaded_ids = []
minutes = {}
def __init__(self, interval=15):
self.interval = interval
for i in range(self.interval):
self.minutes[i] = []
def diff(self, first, second):
second = set(second)
return [item for item in first if item not in second]
def load_ids(self, account_ids_list, minute):
''' Loads a list of new ids '''
self.currently_loaded_ids.extend(account_ids_list)
account_ids_list_index = 0
for i in account_ids_list:
minute_in_interval = (account_ids_list_index + minute) % self.interval
self.minutes[minute_in_interval].append(i)
account_ids_list_index += 1
def unload_ids(self, account_ids_list):
''' Unload removed ids from de system '''
for i in self.minutes:
for j in account_ids_list:
if j in self.minutes[i]:
self.currently_loaded_ids.remove(j)
self.minutes[i].remove(j)
def get_account_ids_to_run(self, account_ids_list, minute):
''' Obtain the ids to run in a given minute '''
# Check for new ids in the given list
minute = minute % self.interval
new_ids = self.diff(account_ids_list, self.currently_loaded_ids)
if( len(new_ids) > 0 ):
self.load_ids(new_ids, minute)
# Check for removed ids in the given list
removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)
if( len(removed_ids) > 0):
self.unload_ids(removed_ids)
account_ids_to_return = self.minutes[minute]
return account_ids_to_return
# A little test
if __name__ == "__main__":
dummy_ids = [
10001,
10002,
10003,
10004,
10005,
10006,
10007,
10008,
10009,
10010,
10011,
10012,
10013,
10014,
10015,
10016,
10017,
]
s = Scheduler()
for minute in range(120):
print ( "Minute: %d" % (minute) )
# Loads more ids
if minute == 25:
more_ids = [
9999,
7777,
8888,
]
dummy_ids.extend(more_ids)
# Unload some ids
if minute == 59:
dummy_ids = dummy_ids[:3]
print (s.get_account_ids_to_run(dummy_ids, minute ))
|
[
"class Scheduler:\n currently_loaded_ids = []\n minutes = {}\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n\n def diff(self, first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n def load_ids(self, account_ids_list, minute):\n ''' Loads a list of new ids '''\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n\n def unload_ids(self, account_ids_list):\n ''' Unload removed ids from de system '''\n for i in self.minutes:\n for j in account_ids_list:\n if j in self.minutes[i]:\n self.currently_loaded_ids.remove(j)\n self.minutes[i].remove(j)\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n ''' Obtain the ids to run in a given minute '''\n\n # Check for new ids in the given list\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if( len(new_ids) > 0 ):\n self.load_ids(new_ids, minute)\n\n # Check for removed ids in the given list\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if( len(removed_ids) > 0):\n self.unload_ids(removed_ids)\n\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n# A little test\nif __name__ == \"__main__\":\n dummy_ids = [\n 10001,\n 10002,\n 10003,\n 10004,\n 10005,\n 10006,\n 10007,\n 10008,\n 10009,\n 10010,\n 10011,\n 10012,\n 10013,\n 10014,\n 10015,\n 10016,\n 10017,\n ]\n\n s = Scheduler()\n for minute in range(120):\n print ( \"Minute: %d\" % (minute) )\n\n # Loads more ids\n if minute == 25:\n more_ids = [\n 9999,\n 7777,\n 8888,\n ]\n dummy_ids.extend(more_ids)\n\n # Unload some ids\n if minute == 59:\n dummy_ids = dummy_ids[:3]\n\n print (s.get_account_ids_to_run(dummy_ids, minute ))\n",
"class Scheduler:\n currently_loaded_ids = []\n minutes = {}\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n\n def diff(self, first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n def load_ids(self, account_ids_list, minute):\n \"\"\" Loads a list of new ids \"\"\"\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute\n ) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n\n def unload_ids(self, account_ids_list):\n \"\"\" Unload removed ids from de system \"\"\"\n for i in self.minutes:\n for j in account_ids_list:\n if j in self.minutes[i]:\n self.currently_loaded_ids.remove(j)\n self.minutes[i].remove(j)\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\nif __name__ == '__main__':\n dummy_ids = [10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, \n 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017]\n s = Scheduler()\n for minute in range(120):\n print('Minute: %d' % minute)\n if minute == 25:\n more_ids = [9999, 7777, 8888]\n dummy_ids.extend(more_ids)\n if minute == 59:\n dummy_ids = dummy_ids[:3]\n print(s.get_account_ids_to_run(dummy_ids, minute))\n",
"class Scheduler:\n currently_loaded_ids = []\n minutes = {}\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n\n def diff(self, first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n def load_ids(self, account_ids_list, minute):\n \"\"\" Loads a list of new ids \"\"\"\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute\n ) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n\n def unload_ids(self, account_ids_list):\n \"\"\" Unload removed ids from de system \"\"\"\n for i in self.minutes:\n for j in account_ids_list:\n if j in self.minutes[i]:\n self.currently_loaded_ids.remove(j)\n self.minutes[i].remove(j)\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n\n def diff(self, first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n def load_ids(self, account_ids_list, minute):\n \"\"\" Loads a list of new ids \"\"\"\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute\n ) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n\n def unload_ids(self, account_ids_list):\n \"\"\" Unload removed ids from de system \"\"\"\n for i in self.minutes:\n for j in account_ids_list:\n if j in self.minutes[i]:\n self.currently_loaded_ids.remove(j)\n self.minutes[i].remove(j)\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n\n def diff(self, first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n def load_ids(self, account_ids_list, minute):\n \"\"\" Loads a list of new ids \"\"\"\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute\n ) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n <function token>\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n <function token>\n\n def load_ids(self, account_ids_list, minute):\n \"\"\" Loads a list of new ids \"\"\"\n self.currently_loaded_ids.extend(account_ids_list)\n account_ids_list_index = 0\n for i in account_ids_list:\n minute_in_interval = (account_ids_list_index + minute\n ) % self.interval\n self.minutes[minute_in_interval].append(i)\n account_ids_list_index += 1\n <function token>\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n\n def __init__(self, interval=15):\n self.interval = interval\n for i in range(self.interval):\n self.minutes[i] = []\n <function token>\n <function token>\n <function token>\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_account_ids_to_run(self, account_ids_list, minute):\n \"\"\" Obtain the ids to run in a given minute \"\"\"\n minute = minute % self.interval\n new_ids = self.diff(account_ids_list, self.currently_loaded_ids)\n if len(new_ids) > 0:\n self.load_ids(new_ids, minute)\n removed_ids = self.diff(self.currently_loaded_ids, account_ids_list)\n if len(removed_ids) > 0:\n self.unload_ids(removed_ids)\n account_ids_to_return = self.minutes[minute]\n return account_ids_to_return\n\n\n<code token>\n",
"class Scheduler:\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<class token>\n<code token>\n"
] | false |
99,541 |
9d262d1bcda896119fdff13da27e0eef4561e50e
|
import inquirer
from inquirer.themes import GreenPassion
q = [
inquirer.Text('name',
message='Whats your name?',
default='No one'),
inquirer.List('jon',
message='Does Jon Snow know?',
choices=['yes', 'no'],
default='no'),
inquirer.Checkbox('kill_list',
message='Who you want to kill?',
choices=['Cersei', 'Littlefinger', 'The Mountain']
)
]
inquirer.prompt(q, theme=GreenPassion())
|
[
"import inquirer\nfrom inquirer.themes import GreenPassion\n\nq = [\n inquirer.Text('name',\n message='Whats your name?',\n default='No one'),\n inquirer.List('jon',\n message='Does Jon Snow know?',\n choices=['yes', 'no'],\n default='no'),\n inquirer.Checkbox('kill_list',\n message='Who you want to kill?',\n choices=['Cersei', 'Littlefinger', 'The Mountain']\n )\n]\n\ninquirer.prompt(q, theme=GreenPassion())\n",
"import inquirer\nfrom inquirer.themes import GreenPassion\nq = [inquirer.Text('name', message='Whats your name?', default='No one'),\n inquirer.List('jon', message='Does Jon Snow know?', choices=['yes',\n 'no'], default='no'), inquirer.Checkbox('kill_list', message=\n 'Who you want to kill?', choices=['Cersei', 'Littlefinger',\n 'The Mountain'])]\ninquirer.prompt(q, theme=GreenPassion())\n",
"<import token>\nq = [inquirer.Text('name', message='Whats your name?', default='No one'),\n inquirer.List('jon', message='Does Jon Snow know?', choices=['yes',\n 'no'], default='no'), inquirer.Checkbox('kill_list', message=\n 'Who you want to kill?', choices=['Cersei', 'Littlefinger',\n 'The Mountain'])]\ninquirer.prompt(q, theme=GreenPassion())\n",
"<import token>\n<assignment token>\ninquirer.prompt(q, theme=GreenPassion())\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,542 |
84ca183127159308d65d3c37798d3a759d4c6f82
|
from multiprocessing import Queue,Process
import time,random
list1 = ["java","Python","JavaScript"]
def write(queue):
for value in list1:
print(f"正在向队列中添加数据-->{value}")
queue.put_nowait(value)
time.sleep(random.random())
def read(queue):
while True:
if not queue.empty():
value = queue.get_nowait()
print(f"从队列中读取的数据为-->{value}")
time.sleep(random.random())
else:
break
queue = Queue()
write_data = Process(target=write,args = (queue,))
read_data = Process(target=read,args=(queue,))
write_data.start()
write_data.join()
read_data.start()
read_data.join()
print('ok')
|
[
"from multiprocessing import Queue,Process\nimport time,random\nlist1 = [\"java\",\"Python\",\"JavaScript\"]\n\ndef write(queue):\n for value in list1:\n print(f\"正在向队列中添加数据-->{value}\")\n queue.put_nowait(value)\n time.sleep(random.random())\n\ndef read(queue):\n while True:\n if not queue.empty():\n value = queue.get_nowait()\n print(f\"从队列中读取的数据为-->{value}\")\n time.sleep(random.random())\n else:\n break\n\nqueue = Queue()\nwrite_data = Process(target=write,args = (queue,))\nread_data = Process(target=read,args=(queue,))\n\nwrite_data.start()\nwrite_data.join()\nread_data.start()\nread_data.join()\nprint('ok')\n\n",
"from multiprocessing import Queue, Process\nimport time, random\nlist1 = ['java', 'Python', 'JavaScript']\n\n\ndef write(queue):\n for value in list1:\n print(f'正在向队列中添加数据-->{value}')\n queue.put_nowait(value)\n time.sleep(random.random())\n\n\ndef read(queue):\n while True:\n if not queue.empty():\n value = queue.get_nowait()\n print(f'从队列中读取的数据为-->{value}')\n time.sleep(random.random())\n else:\n break\n\n\nqueue = Queue()\nwrite_data = Process(target=write, args=(queue,))\nread_data = Process(target=read, args=(queue,))\nwrite_data.start()\nwrite_data.join()\nread_data.start()\nread_data.join()\nprint('ok')\n",
"<import token>\nlist1 = ['java', 'Python', 'JavaScript']\n\n\ndef write(queue):\n for value in list1:\n print(f'正在向队列中添加数据-->{value}')\n queue.put_nowait(value)\n time.sleep(random.random())\n\n\ndef read(queue):\n while True:\n if not queue.empty():\n value = queue.get_nowait()\n print(f'从队列中读取的数据为-->{value}')\n time.sleep(random.random())\n else:\n break\n\n\nqueue = Queue()\nwrite_data = Process(target=write, args=(queue,))\nread_data = Process(target=read, args=(queue,))\nwrite_data.start()\nwrite_data.join()\nread_data.start()\nread_data.join()\nprint('ok')\n",
"<import token>\n<assignment token>\n\n\ndef write(queue):\n for value in list1:\n print(f'正在向队列中添加数据-->{value}')\n queue.put_nowait(value)\n time.sleep(random.random())\n\n\ndef read(queue):\n while True:\n if not queue.empty():\n value = queue.get_nowait()\n print(f'从队列中读取的数据为-->{value}')\n time.sleep(random.random())\n else:\n break\n\n\n<assignment token>\nwrite_data.start()\nwrite_data.join()\nread_data.start()\nread_data.join()\nprint('ok')\n",
"<import token>\n<assignment token>\n\n\ndef write(queue):\n for value in list1:\n print(f'正在向队列中添加数据-->{value}')\n queue.put_nowait(value)\n time.sleep(random.random())\n\n\ndef read(queue):\n while True:\n if not queue.empty():\n value = queue.get_nowait()\n print(f'从队列中读取的数据为-->{value}')\n time.sleep(random.random())\n else:\n break\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef write(queue):\n for value in list1:\n print(f'正在向队列中添加数据-->{value}')\n queue.put_nowait(value)\n time.sleep(random.random())\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,543 |
c7a72bb88345558f06bce2b51a037ce3bc85cfb6
|
#!/usr/bin/env python3
# Enter your code here. Read input from STDIN. Print output to STDOUT
def string_manipulate(string):
even_string=''
odd_string=''
for idx, val in enumerate(string):
if idx % 2 == 0:
even_string+=val
else:
odd_string+=val
return even_string+" "+odd_string
if __name__ == '__main__':
T = int(input().strip())
for t in range(T):
string = str(input().strip())
print(string_manipulate(string))
|
[
"#!/usr/bin/env python3\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\n\ndef string_manipulate(string):\n even_string=''\n odd_string=''\n for idx, val in enumerate(string):\n if idx % 2 == 0:\n even_string+=val\n else:\n odd_string+=val\n \n return even_string+\" \"+odd_string\n \n \nif __name__ == '__main__':\n T = int(input().strip())\n for t in range(T):\n string = str(input().strip()) \n print(string_manipulate(string))\n \n\n",
"def string_manipulate(string):\n even_string = ''\n odd_string = ''\n for idx, val in enumerate(string):\n if idx % 2 == 0:\n even_string += val\n else:\n odd_string += val\n return even_string + ' ' + odd_string\n\n\nif __name__ == '__main__':\n T = int(input().strip())\n for t in range(T):\n string = str(input().strip())\n print(string_manipulate(string))\n",
"def string_manipulate(string):\n even_string = ''\n odd_string = ''\n for idx, val in enumerate(string):\n if idx % 2 == 0:\n even_string += val\n else:\n odd_string += val\n return even_string + ' ' + odd_string\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
99,544 |
7b45d0f2de38afed5c1ab2ef58644ec9494eeede
|
from django.db import models
class Assessment(models.Model):
"""
Assessment Model
"""
name = models.CharField(max_length=400)
description = models.TextField(null=True, blank=True)
user = models.ForeignKey('accounts.UserProfile', null=True, blank=True,
on_delete=models.SET_NULL)
def __str__(self):
return self.name
def get_name(self):
return self.name
class Question(models.Model):
"""
Question Model
"""
assessment = models.ForeignKey('Assessment', null=True, blank=True,
on_delete=models.SET_NULL)
question_number = models.CharField(max_length=2, null=True, blank=True)
question = models.TextField()
image = models.ImageField(null=True, blank=True)
image_url = models.URLField(max_length=1024, null=True, blank=True)
def __str__(self):
return self.question
|
[
"from django.db import models\n\n\nclass Assessment(models.Model):\n \"\"\"\n Assessment Model\n \"\"\"\n name = models.CharField(max_length=400)\n description = models.TextField(null=True, blank=True)\n user = models.ForeignKey('accounts.UserProfile', null=True, blank=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n return self.name\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"from django.db import models\n\n\nclass Assessment(models.Model):\n \"\"\"\n Assessment Model\n \"\"\"\n name = models.CharField(max_length=400)\n description = models.TextField(null=True, blank=True)\n user = models.ForeignKey('accounts.UserProfile', null=True, blank=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n return self.name\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n\n\nclass Assessment(models.Model):\n \"\"\"\n Assessment Model\n \"\"\"\n name = models.CharField(max_length=400)\n description = models.TextField(null=True, blank=True)\n user = models.ForeignKey('accounts.UserProfile', null=True, blank=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n return self.name\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n\n\nclass Assessment(models.Model):\n <docstring token>\n name = models.CharField(max_length=400)\n description = models.TextField(null=True, blank=True)\n user = models.ForeignKey('accounts.UserProfile', null=True, blank=True,\n on_delete=models.SET_NULL)\n\n def __str__(self):\n return self.name\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n\n\nclass Assessment(models.Model):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.name\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n\n\nclass Assessment(models.Model):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_name(self):\n return self.name\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n\n\nclass Assessment(models.Model):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n<class token>\n\n\nclass Question(models.Model):\n \"\"\"\n Question Model\n \"\"\"\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n<class token>\n\n\nclass Question(models.Model):\n <docstring token>\n assessment = models.ForeignKey('Assessment', null=True, blank=True,\n on_delete=models.SET_NULL)\n question_number = models.CharField(max_length=2, null=True, blank=True)\n question = models.TextField()\n image = models.ImageField(null=True, blank=True)\n image_url = models.URLField(max_length=1024, null=True, blank=True)\n\n def __str__(self):\n return self.question\n",
"<import token>\n<class token>\n\n\nclass Question(models.Model):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.question\n",
"<import token>\n<class token>\n\n\nclass Question(models.Model):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,545 |
4f2f962919e36962ec2bbb7d16f8569d201a4188
|
# coding=utf-8
import xml
import xml.dom.minidom
import inspect
from xml.dom.minidom import parse
file=open("test.xast")
xml=parse(file)
print(xml.toxml())
print(dir(xml))
# print inspect
# print dir(inspect)
|
[
"# coding=utf-8\nimport xml\nimport xml.dom.minidom\nimport inspect\nfrom xml.dom.minidom import parse\nfile=open(\"test.xast\")\nxml=parse(file)\nprint(xml.toxml())\nprint(dir(xml))\n# print inspect\n# print dir(inspect)",
"import xml\nimport xml.dom.minidom\nimport inspect\nfrom xml.dom.minidom import parse\nfile = open('test.xast')\nxml = parse(file)\nprint(xml.toxml())\nprint(dir(xml))\n",
"<import token>\nfile = open('test.xast')\nxml = parse(file)\nprint(xml.toxml())\nprint(dir(xml))\n",
"<import token>\n<assignment token>\nprint(xml.toxml())\nprint(dir(xml))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,546 |
f039d4068dd6896c41043f2983b3966bcf903e83
|
class Galaxia:
_g_id=100000
def __init__(self):
self._id = Galaxia._g_id
self.planetas =[]
Galaxia._g_id += 1
def modificar_galaxia(self, planeta):
if planeta not in self.planetas:
print("Ese planeta no se encuentra en esta galaxia")
else:
i = int(input("¿Que atributo deseas modificar? Introduzca el numero asociado: "))
print("0 : Tasa de minerales")
print("1 : Tasa de deuterio")
print("2 : Cantidad de soldados")
print("3 : Cantidad de magos")
print("4 : Crear planeta")
print("5 : Eliminar planeta")
if i == 0:
n = int(input("Introduzca el nuevo valor para la tasa de minerales: "))
while n < 1 or n > 10:
print("Ese valor no es valido.")
n = int(input("Introduzca el nuevo valor para la tasa de minerales: "))
|
[
"class Galaxia:\r\n _g_id=100000\r\n def __init__(self):\r\n self._id = Galaxia._g_id\r\n self.planetas =[]\r\n Galaxia._g_id += 1\r\n\r\n def modificar_galaxia(self, planeta):\r\n if planeta not in self.planetas:\r\n print(\"Ese planeta no se encuentra en esta galaxia\")\r\n else:\r\n i = int(input(\"¿Que atributo deseas modificar? Introduzca el numero asociado: \"))\r\n print(\"0 : Tasa de minerales\")\r\n print(\"1 : Tasa de deuterio\")\r\n print(\"2 : Cantidad de soldados\")\r\n print(\"3 : Cantidad de magos\")\r\n print(\"4 : Crear planeta\")\r\n print(\"5 : Eliminar planeta\")\r\n\r\n if i == 0:\r\n n = int(input(\"Introduzca el nuevo valor para la tasa de minerales: \"))\r\n while n < 1 or n > 10:\r\n print(\"Ese valor no es valido.\")\r\n n = int(input(\"Introduzca el nuevo valor para la tasa de minerales: \"))\r\n\r\n\r\n\r\n\r\n",
"class Galaxia:\n _g_id = 100000\n\n def __init__(self):\n self._id = Galaxia._g_id\n self.planetas = []\n Galaxia._g_id += 1\n\n def modificar_galaxia(self, planeta):\n if planeta not in self.planetas:\n print('Ese planeta no se encuentra en esta galaxia')\n else:\n i = int(input(\n '¿Que atributo deseas modificar? Introduzca el numero asociado: '\n ))\n print('0 : Tasa de minerales')\n print('1 : Tasa de deuterio')\n print('2 : Cantidad de soldados')\n print('3 : Cantidad de magos')\n print('4 : Crear planeta')\n print('5 : Eliminar planeta')\n if i == 0:\n n = int(input(\n 'Introduzca el nuevo valor para la tasa de minerales: '))\n while n < 1 or n > 10:\n print('Ese valor no es valido.')\n n = int(input(\n 'Introduzca el nuevo valor para la tasa de minerales: '\n ))\n",
"class Galaxia:\n <assignment token>\n\n def __init__(self):\n self._id = Galaxia._g_id\n self.planetas = []\n Galaxia._g_id += 1\n\n def modificar_galaxia(self, planeta):\n if planeta not in self.planetas:\n print('Ese planeta no se encuentra en esta galaxia')\n else:\n i = int(input(\n '¿Que atributo deseas modificar? Introduzca el numero asociado: '\n ))\n print('0 : Tasa de minerales')\n print('1 : Tasa de deuterio')\n print('2 : Cantidad de soldados')\n print('3 : Cantidad de magos')\n print('4 : Crear planeta')\n print('5 : Eliminar planeta')\n if i == 0:\n n = int(input(\n 'Introduzca el nuevo valor para la tasa de minerales: '))\n while n < 1 or n > 10:\n print('Ese valor no es valido.')\n n = int(input(\n 'Introduzca el nuevo valor para la tasa de minerales: '\n ))\n",
"class Galaxia:\n <assignment token>\n\n def __init__(self):\n self._id = Galaxia._g_id\n self.planetas = []\n Galaxia._g_id += 1\n <function token>\n",
"class Galaxia:\n <assignment token>\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
99,547 |
e716f04f8f53972f78dc6bc8cec55a3f97cee441
|
# Copyright (c) Dec 22, 2014 CareerMonk Publications and others.
# E-Mail : [email protected]
# Creation Date : 2014-01-10 06:15:46
# Last modification : 2008-10-31
# by : Narasimha Karumanchi
# Book Title : Data Structures And Algorithms Made In Java
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
class Node:
def __init__(self, data):
self.set_data(data)
self.set_next(None)
self.set_rand(None)
# method for setting the data field of the node
def set_data(self, data):
self.data = data
# method for getting the data field of the node
def get_data(self):
return self.data
# method for setting the next field of the node
def set_next(self, nextV):
self.next = nextV
# method for setting the next field of the node
def set_rand(self, rand):
self.rand = rand
# method for getting the next field of the node
def get_rand(self):
return self.rand
# method for getting the next field of the node
def get_next(self):
return self.next
# returns true if the node points to another node
def has_next(self):
return self.next != None
def clone_linked_list(old):
if not old:
return
old_copy = old
root = Node(old.get_data())
prev = root
temp = None
old = old.get_next()
mapping = {}
while old:
temp = Node(old.get_data())
mapping[old] = temp
prev.set_next(temp)
prev = temp
old = old.get_next()
old = old_copy
temp = root
while old:
temp.set_rand(mapping[old.rand])
temp = temp.get_next()
old = old.get_next()
return root
|
[
"# Copyright (c) Dec 22, 2014 CareerMonk Publications and others.\n# E-Mail \t\t: [email protected] \n# Creation Date \t\t: 2014-01-10 06:15:46 \n# Last modification\t\t: 2008-10-31 \n# by\t\t: Narasimha Karumanchi \n# Book Title\t\t\t: Data Structures And Algorithms Made In Java\n# Warranty \t\t: This software is provided \"as is\" without any \n# \t\t\t\t warranty; without even the implied warranty of \n# \t\t\t\t merchantability or fitness for a particular purpose. \n\nclass Node:\n\tdef __init__(self, data):\n\t\tself.set_data(data)\n\t\tself.set_next(None)\n\t\tself.set_rand(None)\n\t# method for setting the data field of the node \n\tdef set_data(self, data):\n\t\tself.data = data\n\t# method for getting the data field of the node \n\tdef get_data(self):\n\t\treturn self.data\n\t# method for setting the next field of the node\n\tdef set_next(self, nextV):\n\t\tself.next = nextV\n\t# method for setting the next field of the node\n\tdef set_rand(self, rand):\n\t\tself.rand = rand\t\n\t# method for getting the next field of the node\n\tdef get_rand(self):\n\t\treturn self.rand\t\n\t# method for getting the next field of the node \n\tdef get_next(self):\n\t\treturn self.next\n\t# returns true if the node points to another node\n\tdef has_next(self):\n\t return self.next != None\n\t \n\tdef clone_linked_list(old):\n\t if not old:\n\t\treturn\n\n\t old_copy = old\n\t root = Node(old.get_data())\n\t prev = root\n\t temp = None\n\n\t old = old.get_next()\n\n\t mapping = {}\n\t \n\t while old:\n\t\ttemp = Node(old.get_data())\n\t\tmapping[old] = temp\n\t\t\n\t\tprev.set_next(temp)\n\t\tprev = temp\n\t\told = old.get_next()\n\n\t old = old_copy\n\t temp = root\n\n\t while old:\n\t\ttemp.set_rand(mapping[old.rand])\n\t\ttemp = temp.get_next()\n\t\told = old.get_next()\n\n\t return root\n"
] | true |
99,548 |
eb2401d35e08cea16ee76441d6a77214a8edcfa7
|
from PyQt4 import QtCore
import numpy as np
from collections import namedtuple
Position = namedtuple("Position", "direction scannumber polarization freqindex")
class TimeScanner(QtCore.QObject):
scanPositionChanged = QtCore.pyqtSignal(int)
changeTemperature = QtCore.pyqtSignal(int)
nextIndexChanged = QtCore.pyqtSignal(tuple)
topReached = QtCore.pyqtSignal(int)
bottomReached = QtCore.pyqtSignal(int)
boundaryReached = QtCore.pyqtSignal(tuple)
measured = QtCore.pyqtSignal(int)
dtChanged = QtCore.pyqtSignal(float)
bottomChanged = QtCore.pyqtSignal(int)
UP, DOWN = 0, 1
def __init__(self, n=100, parent=None):
QtCore.QObject.__init__(self, parent)
self.direction = TimeScanner.UP
self.top = 65535
self.bot = 0
self.ndot = n
self.nscans = 3
#self.period = 60 # is not used for now
self.debugprint = False
self.updaterange()
def setTop(self, top):
self.top = top
self.updaterange()
def setBottom(self, bot):
self.bot = bot
self.updaterange()
def setNdot(self, n):
self.ndot = n
self.updaterange()
def updaterange(self):
self.range = np.array(np.linspace(self.bot, self.top, self.ndot), dtype=int)
self.temperatureList = np.append(self.range, self.range[::-1])
self.dtChanged.emit(float(self.range[1]-self.range[0]))
self.bottomChanged.emit(self.range[0])
# for each scan we have 2 directions,
# for each dirrection we have 2 polarization states
self.statesNumber = self.nscans * self.ndot * 2 * 2
self.reset()
def measure(self):
targetT = self.temperatureList[self.pos.freqindex]
if self.pos.direction == TimeScanner.UP:
self.measured.emit(targetT)
else:
self.measured.emit(2 * self.top - targetT)
def stateToPos(self, state):
rest, polarization = divmod(state, 2)
rest, tempindex = divmod(rest, self.ndot)
rest, direction = divmod(rest, 2)
rest, scannumber = divmod(rest, self.nscans)
assert rest == 0, "Index calculation failed"
if direction == TimeScanner.DOWN:
tempindex = self.ndot - tempindex - 1
pos = Position(direction, scannumber, polarization, tempindex)
return pos
def inc(self):
if self.state is None:
self.state = 0
self.pos = self.stateToPos(self.state)
return
self.state = self.state + 1
if self.state == self.statesNumber:
self.state = self.ndot * 2 * 2
self.pos = self.stateToPos(self.state)
def scan(self):
self.inc()
pos = self.pos
if self.debugprint:
self.debugprint = False
print "debug print! pos:", self.pos
self.nextIndexChanged.emit(pos)
targetT = self.temperatureList[pos.freqindex]
self.changeTemperature.emit(targetT)
if pos.direction == TimeScanner.UP:
self.scanPositionChanged.emit(targetT)
else:
self.scanPositionChanged.emit(2 * self.top - targetT)
if pos.freqindex == 0 and \
pos.direction == TimeScanner.DOWN and \
pos.polarization == 1:
print "bottom reached! pos:", self.pos
self.debugprint = True
self.bottomReached.emit(pos.scannumber)
self.boundaryReached.emit((pos.direction, pos.scannumber))
if pos.freqindex == self.ndot - 1 and \
pos.polarization == 1 and \
pos.direction == TimeScanner.UP :
print "top reached! pos:", self.pos
self.debugprint = True
self.topReached.emit(pos.scannumber)
self.boundaryReached.emit((pos.direction, pos.scannumber))
def reset(self):
self.state = None
self.pos = None
|
[
"from PyQt4 import QtCore\nimport numpy as np\nfrom collections import namedtuple\n\nPosition = namedtuple(\"Position\", \"direction scannumber polarization freqindex\")\n\nclass TimeScanner(QtCore.QObject):\n scanPositionChanged = QtCore.pyqtSignal(int)\n changeTemperature = QtCore.pyqtSignal(int)\n nextIndexChanged = QtCore.pyqtSignal(tuple)\n topReached = QtCore.pyqtSignal(int)\n bottomReached = QtCore.pyqtSignal(int)\n boundaryReached = QtCore.pyqtSignal(tuple)\n measured = QtCore.pyqtSignal(int)\n dtChanged = QtCore.pyqtSignal(float)\n bottomChanged = QtCore.pyqtSignal(int)\n UP, DOWN = 0, 1\n def __init__(self, n=100, parent=None):\n QtCore.QObject.__init__(self, parent)\n self.direction = TimeScanner.UP\n self.top = 65535\n self.bot = 0\n self.ndot = n\n self.nscans = 3\n #self.period = 60 # is not used for now\n self.debugprint = False\n self.updaterange()\n \n def setTop(self, top):\n self.top = top\n self.updaterange()\n \n def setBottom(self, bot):\n self.bot = bot\n self.updaterange()\n \n def setNdot(self, n):\n self.ndot = n\n self.updaterange()\n \n def updaterange(self):\n self.range = np.array(np.linspace(self.bot, self.top, self.ndot), dtype=int)\n self.temperatureList = np.append(self.range, self.range[::-1])\n self.dtChanged.emit(float(self.range[1]-self.range[0]))\n self.bottomChanged.emit(self.range[0])\n # for each scan we have 2 directions,\n # for each dirrection we have 2 polarization states\n self.statesNumber = self.nscans * self.ndot * 2 * 2 \n self.reset()\n \n def measure(self):\n targetT = self.temperatureList[self.pos.freqindex]\n if self.pos.direction == TimeScanner.UP:\n self.measured.emit(targetT)\n else:\n self.measured.emit(2 * self.top - targetT)\n \n def stateToPos(self, state):\n rest, polarization = divmod(state, 2)\n rest, tempindex = divmod(rest, self.ndot)\n rest, direction = divmod(rest, 2)\n rest, scannumber = divmod(rest, self.nscans)\n assert rest == 0, \"Index calculation failed\"\n if direction == TimeScanner.DOWN:\n tempindex = self.ndot - tempindex - 1 \n pos = Position(direction, scannumber, polarization, tempindex)\n return pos\n \n def inc(self):\n if self.state is None:\n self.state = 0\n self.pos = self.stateToPos(self.state)\n return\n \n self.state = self.state + 1 \n if self.state == self.statesNumber:\n self.state = self.ndot * 2 * 2 \n self.pos = self.stateToPos(self.state)\n \n def scan(self):\n self.inc()\n pos = self.pos\n if self.debugprint:\n self.debugprint = False\n print \"debug print! pos:\", self.pos\n \n self.nextIndexChanged.emit(pos)\n targetT = self.temperatureList[pos.freqindex]\n self.changeTemperature.emit(targetT)\n if pos.direction == TimeScanner.UP:\n self.scanPositionChanged.emit(targetT)\n else:\n self.scanPositionChanged.emit(2 * self.top - targetT)\n if pos.freqindex == 0 and \\\n pos.direction == TimeScanner.DOWN and \\\n pos.polarization == 1:\n print \"bottom reached! pos:\", self.pos\n self.debugprint = True\n self.bottomReached.emit(pos.scannumber)\n self.boundaryReached.emit((pos.direction, pos.scannumber))\n\n if pos.freqindex == self.ndot - 1 and \\\n pos.polarization == 1 and \\\n pos.direction == TimeScanner.UP :\n print \"top reached! pos:\", self.pos\n self.debugprint = True\n self.topReached.emit(pos.scannumber)\n self.boundaryReached.emit((pos.direction, pos.scannumber))\n \n \n def reset(self):\n self.state = None\n self.pos = None\n"
] | true |
99,549 |
6a969bafe3f196c2a57fff1feaf7081ac7e35bf7
|
import argparse
from tqdm import tqdm
def win(inputfile):
for en in inputfile['endpoints']:
for con in en['connections']:
con['lat'] = en['data_lat'] - con['lat']
return inputfile
def read_file(path):
with open(path, 'r') as reader:
count_videos, count_endpoints, count_requests, count_caches, storage_cache = [int(i) for i in reader.readline().split(" ")]
video_sizes = [int(i) for i in reader.readline().split(" ")]
endpoints = []
for eid in tqdm(range(count_endpoints), desc="Reading endpoints"):
data_lat, connected_caches = [int (i) for i in reader.readline().split(" ")]
connections = []
for cnumb in range(connected_caches):
number_cache, lat_cache = [int (i) for i in reader.readline().split(" ")]
connections.append({'id':number_cache, 'lat': lat_cache})
endpoints.append({'id': eid, 'data_lat': data_lat, 'connections': connections, 'requests':[]})
for rid in tqdm(range(count_requests), desc="Reading requests"):
number_video, eid, requests = [int(i) for i in reader.readline().split(" ")]
endpoints[eid]['requests'].append({'number_video': number_video, 'count_requests': requests})
return {
'count_videos': count_videos,
'count_endpoints': count_endpoints,
'count_requests': count_requests,
'count_caches': count_caches,
'storage_cache': storage_cache,
'video_sizes': video_sizes,
'endpoints': endpoints
}
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input file")
parser.add_argument("output", help="output file")
args = parser.parse_args()
inputfile = read_file(args.input)
inputfile= win(inputfile)
with open(args.output, 'r') as reader:
count_caches = int(reader.readline())
caches = []
for i in tqdm(range(count_caches), desc='read output'):
line = reader.readline().split(" ")
id = int(line[0])
videos = []
line.remove(line[0])
for v in line:
videos.append(int(v))
caches.append({'id': id, 'videos': videos})
score = 0
print(inputfile)
for end in tqdm(inputfile['endpoints'], desc='counting scores'):
for req in end['requests']:
variants = []
variants.append(0)
for c in caches:
for v in c['videos']:
if v == req['number_video']:
for con in end['connections']:
if con['id'] == c['id']:
variants.append(con['lat']*req['count_requests'])
variants.sort(reverse=1)
score+=variants[0]
print("Scores: ", score)
|
[
"import argparse\r\nfrom tqdm import tqdm\r\n\r\ndef win(inputfile):\r\n for en in inputfile['endpoints']:\r\n for con in en['connections']:\r\n con['lat'] = en['data_lat'] - con['lat']\r\n return inputfile\r\n\r\ndef read_file(path):\r\n with open(path, 'r') as reader:\r\n\r\n\r\n count_videos, count_endpoints, count_requests, count_caches, storage_cache = [int(i) for i in reader.readline().split(\" \")]\r\n\r\n video_sizes = [int(i) for i in reader.readline().split(\" \")]\r\n endpoints = []\r\n for eid in tqdm(range(count_endpoints), desc=\"Reading endpoints\"):\r\n data_lat, connected_caches = [int (i) for i in reader.readline().split(\" \")]\r\n connections = []\r\n\r\n for cnumb in range(connected_caches):\r\n number_cache, lat_cache = [int (i) for i in reader.readline().split(\" \")]\r\n connections.append({'id':number_cache, 'lat': lat_cache})\r\n\r\n endpoints.append({'id': eid, 'data_lat': data_lat, 'connections': connections, 'requests':[]})\r\n\r\n\r\n for rid in tqdm(range(count_requests), desc=\"Reading requests\"):\r\n number_video, eid, requests = [int(i) for i in reader.readline().split(\" \")]\r\n endpoints[eid]['requests'].append({'number_video': number_video, 'count_requests': requests})\r\n\r\n return {\r\n 'count_videos': count_videos,\r\n 'count_endpoints': count_endpoints,\r\n 'count_requests': count_requests,\r\n 'count_caches': count_caches,\r\n 'storage_cache': storage_cache,\r\n 'video_sizes': video_sizes,\r\n 'endpoints': endpoints\r\n }\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument(\"input\", help=\"input file\")\r\nparser.add_argument(\"output\", help=\"output file\")\r\nargs = parser.parse_args()\r\n\r\ninputfile = read_file(args.input)\r\ninputfile= win(inputfile)\r\nwith open(args.output, 'r') as reader:\r\n count_caches = int(reader.readline())\r\n caches = []\r\n for i in tqdm(range(count_caches), desc='read output'):\r\n line = reader.readline().split(\" \")\r\n id = int(line[0])\r\n videos = []\r\n line.remove(line[0])\r\n for v in line:\r\n videos.append(int(v))\r\n caches.append({'id': id, 'videos': videos})\r\n\r\n score = 0\r\n print(inputfile)\r\n for end in tqdm(inputfile['endpoints'], desc='counting scores'):\r\n for req in end['requests']:\r\n variants = []\r\n variants.append(0)\r\n for c in caches:\r\n for v in c['videos']:\r\n if v == req['number_video']:\r\n for con in end['connections']:\r\n if con['id'] == c['id']:\r\n variants.append(con['lat']*req['count_requests'])\r\n variants.sort(reverse=1)\r\n score+=variants[0]\r\n print(\"Scores: \", score)\r\n",
"import argparse\nfrom tqdm import tqdm\n\n\ndef win(inputfile):\n for en in inputfile['endpoints']:\n for con in en['connections']:\n con['lat'] = en['data_lat'] - con['lat']\n return inputfile\n\n\ndef read_file(path):\n with open(path, 'r') as reader:\n (count_videos, count_endpoints, count_requests, count_caches,\n storage_cache) = [int(i) for i in reader.readline().split(' ')]\n video_sizes = [int(i) for i in reader.readline().split(' ')]\n endpoints = []\n for eid in tqdm(range(count_endpoints), desc='Reading endpoints'):\n data_lat, connected_caches = [int(i) for i in reader.readline()\n .split(' ')]\n connections = []\n for cnumb in range(connected_caches):\n number_cache, lat_cache = [int(i) for i in reader.readline(\n ).split(' ')]\n connections.append({'id': number_cache, 'lat': lat_cache})\n endpoints.append({'id': eid, 'data_lat': data_lat,\n 'connections': connections, 'requests': []})\n for rid in tqdm(range(count_requests), desc='Reading requests'):\n number_video, eid, requests = [int(i) for i in reader.readline(\n ).split(' ')]\n endpoints[eid]['requests'].append({'number_video': number_video,\n 'count_requests': requests})\n return {'count_videos': count_videos, 'count_endpoints':\n count_endpoints, 'count_requests': count_requests,\n 'count_caches': count_caches, 'storage_cache': storage_cache,\n 'video_sizes': video_sizes, 'endpoints': endpoints}\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input', help='input file')\nparser.add_argument('output', help='output file')\nargs = parser.parse_args()\ninputfile = read_file(args.input)\ninputfile = win(inputfile)\nwith open(args.output, 'r') as reader:\n count_caches = int(reader.readline())\n caches = []\n for i in tqdm(range(count_caches), desc='read output'):\n line = reader.readline().split(' ')\n id = int(line[0])\n videos = []\n line.remove(line[0])\n for v in line:\n videos.append(int(v))\n caches.append({'id': id, 'videos': videos})\n score = 0\n print(inputfile)\n for end in tqdm(inputfile['endpoints'], desc='counting scores'):\n for req in end['requests']:\n variants = []\n variants.append(0)\n for c in caches:\n for v in c['videos']:\n if v == req['number_video']:\n for con in end['connections']:\n if con['id'] == c['id']:\n variants.append(con['lat'] * req[\n 'count_requests'])\n variants.sort(reverse=1)\n score += variants[0]\n print('Scores: ', score)\n",
"<import token>\n\n\ndef win(inputfile):\n for en in inputfile['endpoints']:\n for con in en['connections']:\n con['lat'] = en['data_lat'] - con['lat']\n return inputfile\n\n\ndef read_file(path):\n with open(path, 'r') as reader:\n (count_videos, count_endpoints, count_requests, count_caches,\n storage_cache) = [int(i) for i in reader.readline().split(' ')]\n video_sizes = [int(i) for i in reader.readline().split(' ')]\n endpoints = []\n for eid in tqdm(range(count_endpoints), desc='Reading endpoints'):\n data_lat, connected_caches = [int(i) for i in reader.readline()\n .split(' ')]\n connections = []\n for cnumb in range(connected_caches):\n number_cache, lat_cache = [int(i) for i in reader.readline(\n ).split(' ')]\n connections.append({'id': number_cache, 'lat': lat_cache})\n endpoints.append({'id': eid, 'data_lat': data_lat,\n 'connections': connections, 'requests': []})\n for rid in tqdm(range(count_requests), desc='Reading requests'):\n number_video, eid, requests = [int(i) for i in reader.readline(\n ).split(' ')]\n endpoints[eid]['requests'].append({'number_video': number_video,\n 'count_requests': requests})\n return {'count_videos': count_videos, 'count_endpoints':\n count_endpoints, 'count_requests': count_requests,\n 'count_caches': count_caches, 'storage_cache': storage_cache,\n 'video_sizes': video_sizes, 'endpoints': endpoints}\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input', help='input file')\nparser.add_argument('output', help='output file')\nargs = parser.parse_args()\ninputfile = read_file(args.input)\ninputfile = win(inputfile)\nwith open(args.output, 'r') as reader:\n count_caches = int(reader.readline())\n caches = []\n for i in tqdm(range(count_caches), desc='read output'):\n line = reader.readline().split(' ')\n id = int(line[0])\n videos = []\n line.remove(line[0])\n for v in line:\n videos.append(int(v))\n caches.append({'id': id, 'videos': videos})\n score = 0\n print(inputfile)\n for end in tqdm(inputfile['endpoints'], desc='counting scores'):\n for req in end['requests']:\n variants = []\n variants.append(0)\n for c in caches:\n for v in c['videos']:\n if v == req['number_video']:\n for con in end['connections']:\n if con['id'] == c['id']:\n variants.append(con['lat'] * req[\n 'count_requests'])\n variants.sort(reverse=1)\n score += variants[0]\n print('Scores: ', score)\n",
"<import token>\n\n\ndef win(inputfile):\n for en in inputfile['endpoints']:\n for con in en['connections']:\n con['lat'] = en['data_lat'] - con['lat']\n return inputfile\n\n\ndef read_file(path):\n with open(path, 'r') as reader:\n (count_videos, count_endpoints, count_requests, count_caches,\n storage_cache) = [int(i) for i in reader.readline().split(' ')]\n video_sizes = [int(i) for i in reader.readline().split(' ')]\n endpoints = []\n for eid in tqdm(range(count_endpoints), desc='Reading endpoints'):\n data_lat, connected_caches = [int(i) for i in reader.readline()\n .split(' ')]\n connections = []\n for cnumb in range(connected_caches):\n number_cache, lat_cache = [int(i) for i in reader.readline(\n ).split(' ')]\n connections.append({'id': number_cache, 'lat': lat_cache})\n endpoints.append({'id': eid, 'data_lat': data_lat,\n 'connections': connections, 'requests': []})\n for rid in tqdm(range(count_requests), desc='Reading requests'):\n number_video, eid, requests = [int(i) for i in reader.readline(\n ).split(' ')]\n endpoints[eid]['requests'].append({'number_video': number_video,\n 'count_requests': requests})\n return {'count_videos': count_videos, 'count_endpoints':\n count_endpoints, 'count_requests': count_requests,\n 'count_caches': count_caches, 'storage_cache': storage_cache,\n 'video_sizes': video_sizes, 'endpoints': endpoints}\n\n\n<assignment token>\nparser.add_argument('input', help='input file')\nparser.add_argument('output', help='output file')\n<assignment token>\nwith open(args.output, 'r') as reader:\n count_caches = int(reader.readline())\n caches = []\n for i in tqdm(range(count_caches), desc='read output'):\n line = reader.readline().split(' ')\n id = int(line[0])\n videos = []\n line.remove(line[0])\n for v in line:\n videos.append(int(v))\n caches.append({'id': id, 'videos': videos})\n score = 0\n print(inputfile)\n for end in tqdm(inputfile['endpoints'], desc='counting scores'):\n for req in end['requests']:\n variants = []\n variants.append(0)\n for c in caches:\n for v in c['videos']:\n if v == req['number_video']:\n for con in end['connections']:\n if con['id'] == c['id']:\n variants.append(con['lat'] * req[\n 'count_requests'])\n variants.sort(reverse=1)\n score += variants[0]\n print('Scores: ', score)\n",
"<import token>\n\n\ndef win(inputfile):\n for en in inputfile['endpoints']:\n for con in en['connections']:\n con['lat'] = en['data_lat'] - con['lat']\n return inputfile\n\n\ndef read_file(path):\n with open(path, 'r') as reader:\n (count_videos, count_endpoints, count_requests, count_caches,\n storage_cache) = [int(i) for i in reader.readline().split(' ')]\n video_sizes = [int(i) for i in reader.readline().split(' ')]\n endpoints = []\n for eid in tqdm(range(count_endpoints), desc='Reading endpoints'):\n data_lat, connected_caches = [int(i) for i in reader.readline()\n .split(' ')]\n connections = []\n for cnumb in range(connected_caches):\n number_cache, lat_cache = [int(i) for i in reader.readline(\n ).split(' ')]\n connections.append({'id': number_cache, 'lat': lat_cache})\n endpoints.append({'id': eid, 'data_lat': data_lat,\n 'connections': connections, 'requests': []})\n for rid in tqdm(range(count_requests), desc='Reading requests'):\n number_video, eid, requests = [int(i) for i in reader.readline(\n ).split(' ')]\n endpoints[eid]['requests'].append({'number_video': number_video,\n 'count_requests': requests})\n return {'count_videos': count_videos, 'count_endpoints':\n count_endpoints, 'count_requests': count_requests,\n 'count_caches': count_caches, 'storage_cache': storage_cache,\n 'video_sizes': video_sizes, 'endpoints': endpoints}\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef win(inputfile):\n for en in inputfile['endpoints']:\n for con in en['connections']:\n con['lat'] = en['data_lat'] - con['lat']\n return inputfile\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,550 |
279328791eb2d58225b1e06666dca8887d7ce16f
|
#!/usr/bin/env python3
## fszostak(2020) Python Code Snippets
#
# Split lines in blocks with limit
# max number of bytes
BLK_SIZE=9 # max number of bytes per block
def process(string):
return string.upper()
def batch_process(string):
block = response = ""
offset = n = 0
line_size = string.find("\n")
while line_size > 0:
if offset+line_size > BLK_SIZE:
response = response + process(block)
block = ""
print(f'Block {n} processed')
n += 1
block = block + string[offset:offset+line_size+2]
offset = offset + line_size+2
line_size = string[offset:].find("\n")
block = block + string[offset:]
if block != "":
response = response + process(block)
print(f'Block {n} processed')
return response
# basic test
string = "01 blah blah\n02 blah blah\n03 blah blah blah\n04 blah blah\n05 blah blah\n06 blah blah blah\n07 blah blah\n08 blah blah\n09 blah blah blah\n10 blah blah\n11 blah blah\n12 blah blah blah\n"
print("\nTEST 1 - INPUT:")
print(string)
print("\n\nOUTPUT:")
print(batch_process(string))
# last line great than previous line
string = "01 blah blah\n02 blah blah\n03 blah blah\n04 blah blah\n05 blah blah\n06 blah blah\n07 blah blah\n08 blah blah\n09 blah blah\n10 blah blah\n11 blah blah\n12 blah blah blah\n"
print("\nTEST 2 - INPUT:")
print(string)
print("\n\nOUTPUT:")
print(batch_process(string))
# last line less than previous line
string = "01 blah blah\n02 blah blah\n03 blah blah\n04 blah blah\n05 blah blah\n06 blah blah\n07 blah blah\n08 blah blah\n09 blah blah\n10 blah blah\n11 blah blah\n12 blah\n"
print("\nTEST 3 - INPUT:")
print(string)
print("\n\nOUTPUT:")
print(batch_process(string))
# with \r\n
string = "01 blah blah\r\n02 blah blah\r\n03 blah blah\r\n04 blah blah\r\n05 blah blah\r\n06 blah blah\r\n07 blah blah\r\n08 blah blah\r\n09 blah blah\r\n10 blah blah\r\n11 blah blah\r\n12 blah blah blah\r\n"
print("\nTEST 4 - INPUT:")
print(string)
print("\n\nOUTPUT:")
print(batch_process(string))
# without newline in last line
string = "01 blah blah\r\n02 blah blah\r\n03 blah blah\r\n04 blah blah\r\n05 blah blah\r\n06 blah blah\r\n07 blah blah\r\n08 blah blah\r\n09 blah blah\r\n10 blah blah\r\n11 blah blah\r\n12 blah"
print("\nTEST 5 - INPUT:")
print(string)
print("\n\nOUTPUT:")
print(batch_process(string))
|
[
"#!/usr/bin/env python3\n\n## fszostak(2020) Python Code Snippets\n#\n# Split lines in blocks with limit\n# max number of bytes\n\nBLK_SIZE=9 # max number of bytes per block\n\ndef process(string):\n return string.upper()\n\ndef batch_process(string):\n \n block = response = \"\"\n offset = n = 0\n line_size = string.find(\"\\n\")\n\n while line_size > 0:\n\n if offset+line_size > BLK_SIZE:\n response = response + process(block)\n block = \"\"\n print(f'Block {n} processed')\n n += 1\n\n block = block + string[offset:offset+line_size+2]\n\n offset = offset + line_size+2\n line_size = string[offset:].find(\"\\n\") \n\n block = block + string[offset:]\n if block != \"\":\n response = response + process(block)\n print(f'Block {n} processed')\n\n return response\n\n\n\n# basic test\nstring = \"01 blah blah\\n02 blah blah\\n03 blah blah blah\\n04 blah blah\\n05 blah blah\\n06 blah blah blah\\n07 blah blah\\n08 blah blah\\n09 blah blah blah\\n10 blah blah\\n11 blah blah\\n12 blah blah blah\\n\"\nprint(\"\\nTEST 1 - INPUT:\")\nprint(string)\nprint(\"\\n\\nOUTPUT:\")\nprint(batch_process(string))\n\n# last line great than previous line\nstring = \"01 blah blah\\n02 blah blah\\n03 blah blah\\n04 blah blah\\n05 blah blah\\n06 blah blah\\n07 blah blah\\n08 blah blah\\n09 blah blah\\n10 blah blah\\n11 blah blah\\n12 blah blah blah\\n\"\nprint(\"\\nTEST 2 - INPUT:\")\nprint(string)\nprint(\"\\n\\nOUTPUT:\")\nprint(batch_process(string))\n\n# last line less than previous line\nstring = \"01 blah blah\\n02 blah blah\\n03 blah blah\\n04 blah blah\\n05 blah blah\\n06 blah blah\\n07 blah blah\\n08 blah blah\\n09 blah blah\\n10 blah blah\\n11 blah blah\\n12 blah\\n\"\nprint(\"\\nTEST 3 - INPUT:\")\nprint(string)\nprint(\"\\n\\nOUTPUT:\")\nprint(batch_process(string))\n\n# with \\r\\n\nstring = \"01 blah blah\\r\\n02 blah blah\\r\\n03 blah blah\\r\\n04 blah blah\\r\\n05 blah blah\\r\\n06 blah blah\\r\\n07 blah blah\\r\\n08 blah blah\\r\\n09 blah blah\\r\\n10 blah blah\\r\\n11 blah blah\\r\\n12 blah blah blah\\r\\n\"\nprint(\"\\nTEST 4 - INPUT:\")\nprint(string)\nprint(\"\\n\\nOUTPUT:\")\nprint(batch_process(string))\n\n# without newline in last line\nstring = \"01 blah blah\\r\\n02 blah blah\\r\\n03 blah blah\\r\\n04 blah blah\\r\\n05 blah blah\\r\\n06 blah blah\\r\\n07 blah blah\\r\\n08 blah blah\\r\\n09 blah blah\\r\\n10 blah blah\\r\\n11 blah blah\\r\\n12 blah\"\nprint(\"\\nTEST 5 - INPUT:\")\nprint(string)\nprint(\"\\n\\nOUTPUT:\")\nprint(batch_process(string))\n\n\n\n\n\n\n\n\n\n\n\n",
"BLK_SIZE = 9\n\n\ndef process(string):\n return string.upper()\n\n\ndef batch_process(string):\n block = response = ''\n offset = n = 0\n line_size = string.find('\\n')\n while line_size > 0:\n if offset + line_size > BLK_SIZE:\n response = response + process(block)\n block = ''\n print(f'Block {n} processed')\n n += 1\n block = block + string[offset:offset + line_size + 2]\n offset = offset + line_size + 2\n line_size = string[offset:].find('\\n')\n block = block + string[offset:]\n if block != '':\n response = response + process(block)\n print(f'Block {n} processed')\n return response\n\n\nstring = \"\"\"01 blah blah\n02 blah blah\n03 blah blah blah\n04 blah blah\n05 blah blah\n06 blah blah blah\n07 blah blah\n08 blah blah\n09 blah blah blah\n10 blah blah\n11 blah blah\n12 blah blah blah\n\"\"\"\nprint('\\nTEST 1 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\nstring = \"\"\"01 blah blah\n02 blah blah\n03 blah blah\n04 blah blah\n05 blah blah\n06 blah blah\n07 blah blah\n08 blah blah\n09 blah blah\n10 blah blah\n11 blah blah\n12 blah blah blah\n\"\"\"\nprint('\\nTEST 2 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\nstring = \"\"\"01 blah blah\n02 blah blah\n03 blah blah\n04 blah blah\n05 blah blah\n06 blah blah\n07 blah blah\n08 blah blah\n09 blah blah\n10 blah blah\n11 blah blah\n12 blah\n\"\"\"\nprint('\\nTEST 3 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\nstring = (\n '01 blah blah\\r\\n02 blah blah\\r\\n03 blah blah\\r\\n04 blah blah\\r\\n05 blah blah\\r\\n06 blah blah\\r\\n07 blah blah\\r\\n08 blah blah\\r\\n09 blah blah\\r\\n10 blah blah\\r\\n11 blah blah\\r\\n12 blah blah blah\\r\\n'\n )\nprint('\\nTEST 4 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\nstring = (\n '01 blah blah\\r\\n02 blah blah\\r\\n03 blah blah\\r\\n04 blah blah\\r\\n05 blah blah\\r\\n06 blah blah\\r\\n07 blah blah\\r\\n08 blah blah\\r\\n09 blah blah\\r\\n10 blah blah\\r\\n11 blah blah\\r\\n12 blah'\n )\nprint('\\nTEST 5 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n",
"<assignment token>\n\n\ndef process(string):\n return string.upper()\n\n\ndef batch_process(string):\n block = response = ''\n offset = n = 0\n line_size = string.find('\\n')\n while line_size > 0:\n if offset + line_size > BLK_SIZE:\n response = response + process(block)\n block = ''\n print(f'Block {n} processed')\n n += 1\n block = block + string[offset:offset + line_size + 2]\n offset = offset + line_size + 2\n line_size = string[offset:].find('\\n')\n block = block + string[offset:]\n if block != '':\n response = response + process(block)\n print(f'Block {n} processed')\n return response\n\n\n<assignment token>\nprint('\\nTEST 1 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n<assignment token>\nprint('\\nTEST 2 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n<assignment token>\nprint('\\nTEST 3 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n<assignment token>\nprint('\\nTEST 4 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n<assignment token>\nprint('\\nTEST 5 - INPUT:')\nprint(string)\nprint('\\n\\nOUTPUT:')\nprint(batch_process(string))\n",
"<assignment token>\n\n\ndef process(string):\n return string.upper()\n\n\ndef batch_process(string):\n block = response = ''\n offset = n = 0\n line_size = string.find('\\n')\n while line_size > 0:\n if offset + line_size > BLK_SIZE:\n response = response + process(block)\n block = ''\n print(f'Block {n} processed')\n n += 1\n block = block + string[offset:offset + line_size + 2]\n offset = offset + line_size + 2\n line_size = string[offset:].find('\\n')\n block = block + string[offset:]\n if block != '':\n response = response + process(block)\n print(f'Block {n} processed')\n return response\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<assignment token>\n\n\ndef process(string):\n return string.upper()\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,551 |
5e6111cf973b8a68de971b8bf7d830433f7e6eeb
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Writer: Xinqiao Wang
Organization: Global Energy Interconnection Research Institute, SGCC
Date: 20210317
Objective: Import PaddlePaddle machine learning model (usually vision model)
then give the evaluation of model back.
'''
from collections import OrderedDict
from prettytable import PrettyTable
from numpy import prod
def summary(main_prog, batch_size, bits_per_tensor):
'''
It can summary model's PARAMS, FLOPs until now.
It support common operator like conv, fc, pool, relu, sigmoid, bn etc.
Args:
main_prog: main program
Returns:
print summary on terminal
'''
blocks = main_prog.blocks
collected_ops_list = []
unsupported = set()
block_vars = {}
for block in blocks:
block_vars = {**block_vars, **block.vars}
block_ops = [ele for block in blocks for ele in block.ops]
# block_var: learnable variable,block_op:operator
# 合并blocks(ops和vars并不严格对应,需要合并保证能搜索到所有)
for one_op in block_ops:
op_info = OrderedDict()
spf_res = _summary_model(block_vars, one_op)
if spf_res is None:
continue
if type(spf_res) == str:
unsupported.add(one_op.type)
continue
# TODO: get the operator name
op_info['type'] = one_op.type
op_info['input_shape'] = spf_res[0][1:]
op_info['out_shape'] = spf_res[1][1:]
op_info['PARAMs'] = spf_res[2]
op_info['FLOPs'] = spf_res[3]
collected_ops_list.append(op_info)
summary_table, total = _format_summary(collected_ops_list, batch_size, bits_per_tensor)
_print_summary(summary_table, total, unsupported)
def _summary_model(block_vars, one_op):
'''
Compute operator's params and flops.
Args:
block_vars: all vars of one block
one_op: one operator to count
Returns:
in_data_shape: one operator's input data shape
out_data_shape: one operator's output data shape
params: one operator's PARAMs
flops: : one operator's FLOPs
'''
if one_op.type in ['conv2d', 'depthwise_conv2d']:
k_arg_shape = block_vars[one_op.input("Filter")[0]].shape
in_data_shape = block_vars[one_op.input("Input")[0]].shape
out_data_shape = block_vars[one_op.output("Output")[0]].shape
c_out, c_in, k_h, k_w = k_arg_shape
_, c_out_, h_out, w_out = out_data_shape
assert c_out == c_out_, 'shape error!'
k_groups = one_op.attr("groups")
kernel_ops = k_h * k_w * (c_in / k_groups)
bias_ops = 0 if one_op.input("Bias") == [] else 1
params = c_out * (kernel_ops + bias_ops)
flops = h_out * w_out * c_out * (kernel_ops + bias_ops)
# base nvidia paper, include mul and add
flops = 2 * flops
elif one_op.type == 'pool2d':
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
_, c_out, h_out, w_out = out_data_shape
k_size = one_op.attr("ksize")
params = 0
flops = h_out * w_out * c_out * (k_size[0] * k_size[1])
elif one_op.type == 'mul':
k_arg_shape = block_vars[one_op.input("Y")[0]].shape
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
# TODO: fc has mul ops
# add attr to mul op, tell us whether it belongs to 'fc'
# this's not the best way
if 'fc' not in one_op.output("Out")[0]:
return None
k_in, k_out = k_arg_shape
# bias in sum op
params = k_in * k_out + 1
flops = k_in * k_out
elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu']:
# elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu', 'elementwise_add', 'elementwise_mul', 'elementwise_div']:
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
params = 0
if one_op.type == 'prelu':
params = 1
flops = 1
for one_dim in in_data_shape:
if one_dim != -1:
# 如果不为-1
flops *= one_dim
elif one_op.type == 'batch_norm':
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Y")[0]].shape
_, c_in, h_out, w_out = in_data_shape
# gamma, beta
params = c_in * 2
# compute mean and std
flops = h_out * w_out * c_in * 2
else:
# 有些没有被计算到的type,加入unsupported_set中
# 某些操作,比如affine_channel,仅是仿射变换,不计入
return one_op.type
return in_data_shape, out_data_shape, params, flops
def _format_summary(collected_ops_list, batch_size, bits_per_tensor):
'''
Format summary report.
Args:
collected_ops_list: the collected operator with summary
Returns:
summary_table: summary report format
total: sum param and flops
'''
summary_table = PrettyTable(
["No.", "TYPE", "INPUT", "OUTPUT", "PARAMs", "FLOPs"])
summary_table.align = 'r'
total = {}
total_params = []
total_flops = []
total_outshape = []
for i, one_op in enumerate(collected_ops_list):
# notice the order
table_row = [
i,
one_op['type'],
one_op['input_shape'],
one_op['out_shape'],
int(one_op['PARAMs']),
int(one_op['FLOPs']),
]
if i == 0:
input_shape = one_op['input_shape']
summary_table.add_row(table_row)
total_params.append(int(one_op['PARAMs']))
total_flops.append(int(one_op['FLOPs']))
total_outshape.append(one_op['out_shape'])
total['params'] = total_params
total['flops'] = total_flops
total['out'] = total_outshape
total['gpu'] = cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor)
return summary_table, total
def cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):
gpu_input = prod(input_shape)
gpu_param = total['params']
gpu_backward_forward = sum([prod(ele) for ele in total['out']])
gpu = (gpu_input + gpu_param + gpu_backward_forward)*(batch_size*bits_per_tensor/8) # bytes计数
return gpu
def _print_summary(summary_table, total, unsupported):
'''
Print all the summary on terminal.
Args:
summary_table: summary report format
total: sum param and flops
'''
parmas = total['params']
flops = total['flops']
gpu = total['gpu']
print(
"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]"
)
print("Unsupported operator types:", unsupported)
print(summary_table)
print('Total PARAMs: {}({:.4f}M)'.format(
sum(parmas), sum(parmas) / (10**6)))
print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10**9))
print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10**9))
|
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n'''\nWriter: Xinqiao Wang\nOrganization: Global Energy Interconnection Research Institute, SGCC\nDate: 20210317\nObjective: Import PaddlePaddle machine learning model (usually vision model)\n then give the evaluation of model back.\n'''\nfrom collections import OrderedDict\nfrom prettytable import PrettyTable\nfrom numpy import prod\n\ndef summary(main_prog, batch_size, bits_per_tensor):\n '''\n It can summary model's PARAMS, FLOPs until now.\n It support common operator like conv, fc, pool, relu, sigmoid, bn etc. \n Args:\n main_prog: main program \n Returns:\n print summary on terminal\n '''\n blocks = main_prog.blocks\n collected_ops_list = []\n unsupported = set()\n block_vars = {}\n for block in blocks:\n block_vars = {**block_vars, **block.vars}\n block_ops = [ele for block in blocks for ele in block.ops]\n # block_var: learnable variable,block_op:operator\n # 合并blocks(ops和vars并不严格对应,需要合并保证能搜索到所有)\n for one_op in block_ops:\n op_info = OrderedDict()\n spf_res = _summary_model(block_vars, one_op)\n if spf_res is None:\n continue\n if type(spf_res) == str:\n unsupported.add(one_op.type)\n continue\n # TODO: get the operator name\n op_info['type'] = one_op.type\n op_info['input_shape'] = spf_res[0][1:]\n op_info['out_shape'] = spf_res[1][1:]\n op_info['PARAMs'] = spf_res[2]\n op_info['FLOPs'] = spf_res[3]\n collected_ops_list.append(op_info)\n summary_table, total = _format_summary(collected_ops_list, batch_size, bits_per_tensor)\n _print_summary(summary_table, total, unsupported)\n\n\ndef _summary_model(block_vars, one_op):\n '''\n Compute operator's params and flops.\n Args:\n block_vars: all vars of one block\n one_op: one operator to count\n Returns:\n in_data_shape: one operator's input data shape\n out_data_shape: one operator's output data shape\n params: one operator's PARAMs \n flops: : one operator's FLOPs\n '''\n if one_op.type in ['conv2d', 'depthwise_conv2d']:\n k_arg_shape = block_vars[one_op.input(\"Filter\")[0]].shape\n in_data_shape = block_vars[one_op.input(\"Input\")[0]].shape\n out_data_shape = block_vars[one_op.output(\"Output\")[0]].shape\n c_out, c_in, k_h, k_w = k_arg_shape\n _, c_out_, h_out, w_out = out_data_shape\n assert c_out == c_out_, 'shape error!'\n k_groups = one_op.attr(\"groups\")\n kernel_ops = k_h * k_w * (c_in / k_groups)\n bias_ops = 0 if one_op.input(\"Bias\") == [] else 1\n params = c_out * (kernel_ops + bias_ops)\n flops = h_out * w_out * c_out * (kernel_ops + bias_ops)\n # base nvidia paper, include mul and add\n flops = 2 * flops\n\n elif one_op.type == 'pool2d':\n in_data_shape = block_vars[one_op.input(\"X\")[0]].shape\n out_data_shape = block_vars[one_op.output(\"Out\")[0]].shape\n _, c_out, h_out, w_out = out_data_shape\n k_size = one_op.attr(\"ksize\")\n params = 0\n flops = h_out * w_out * c_out * (k_size[0] * k_size[1])\n\n elif one_op.type == 'mul':\n k_arg_shape = block_vars[one_op.input(\"Y\")[0]].shape\n in_data_shape = block_vars[one_op.input(\"X\")[0]].shape\n out_data_shape = block_vars[one_op.output(\"Out\")[0]].shape\n # TODO: fc has mul ops\n # add attr to mul op, tell us whether it belongs to 'fc'\n # this's not the best way\n if 'fc' not in one_op.output(\"Out\")[0]:\n return None\n k_in, k_out = k_arg_shape\n # bias in sum op\n params = k_in * k_out + 1\n flops = k_in * k_out\n\n elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu']:\n # elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu', 'elementwise_add', 'elementwise_mul', 'elementwise_div']:\n in_data_shape = block_vars[one_op.input(\"X\")[0]].shape\n out_data_shape = block_vars[one_op.output(\"Out\")[0]].shape\n params = 0\n if one_op.type == 'prelu':\n params = 1\n flops = 1\n for one_dim in in_data_shape:\n if one_dim != -1:\n # 如果不为-1\n flops *= one_dim\n\n elif one_op.type == 'batch_norm':\n in_data_shape = block_vars[one_op.input(\"X\")[0]].shape\n out_data_shape = block_vars[one_op.output(\"Y\")[0]].shape\n _, c_in, h_out, w_out = in_data_shape\n # gamma, beta\n params = c_in * 2\n # compute mean and std\n flops = h_out * w_out * c_in * 2\n\n else:\n # 有些没有被计算到的type,加入unsupported_set中\n # 某些操作,比如affine_channel,仅是仿射变换,不计入\n return one_op.type\n\n return in_data_shape, out_data_shape, params, flops\n\n\ndef _format_summary(collected_ops_list, batch_size, bits_per_tensor):\n '''\n Format summary report.\n Args:\n collected_ops_list: the collected operator with summary\n Returns:\n summary_table: summary report format\n total: sum param and flops\n '''\n\n summary_table = PrettyTable(\n [\"No.\", \"TYPE\", \"INPUT\", \"OUTPUT\", \"PARAMs\", \"FLOPs\"])\n summary_table.align = 'r'\n\n total = {}\n total_params = []\n total_flops = []\n total_outshape = []\n for i, one_op in enumerate(collected_ops_list):\n # notice the order\n table_row = [\n i,\n one_op['type'],\n one_op['input_shape'],\n one_op['out_shape'],\n int(one_op['PARAMs']),\n int(one_op['FLOPs']),\n ]\n if i == 0:\n input_shape = one_op['input_shape']\n summary_table.add_row(table_row)\n total_params.append(int(one_op['PARAMs']))\n total_flops.append(int(one_op['FLOPs']))\n total_outshape.append(one_op['out_shape'])\n\n total['params'] = total_params\n total['flops'] = total_flops\n total['out'] = total_outshape\n total['gpu'] = cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor)\n return summary_table, total\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward)*(batch_size*bits_per_tensor/8) # bytes计数\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n '''\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n '''\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"Notice: \\n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\n )\n print(\"Unsupported operator types:\", unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(\n sum(parmas), sum(parmas) / (10**6)))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10**9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10**9))\n",
"<docstring token>\nfrom collections import OrderedDict\nfrom prettytable import PrettyTable\nfrom numpy import prod\n\n\ndef summary(main_prog, batch_size, bits_per_tensor):\n \"\"\"\n It can summary model's PARAMS, FLOPs until now.\n It support common operator like conv, fc, pool, relu, sigmoid, bn etc. \n Args:\n main_prog: main program \n Returns:\n print summary on terminal\n \"\"\"\n blocks = main_prog.blocks\n collected_ops_list = []\n unsupported = set()\n block_vars = {}\n for block in blocks:\n block_vars = {**block_vars, **block.vars}\n block_ops = [ele for block in blocks for ele in block.ops]\n for one_op in block_ops:\n op_info = OrderedDict()\n spf_res = _summary_model(block_vars, one_op)\n if spf_res is None:\n continue\n if type(spf_res) == str:\n unsupported.add(one_op.type)\n continue\n op_info['type'] = one_op.type\n op_info['input_shape'] = spf_res[0][1:]\n op_info['out_shape'] = spf_res[1][1:]\n op_info['PARAMs'] = spf_res[2]\n op_info['FLOPs'] = spf_res[3]\n collected_ops_list.append(op_info)\n summary_table, total = _format_summary(collected_ops_list, batch_size,\n bits_per_tensor)\n _print_summary(summary_table, total, unsupported)\n\n\ndef _summary_model(block_vars, one_op):\n \"\"\"\n Compute operator's params and flops.\n Args:\n block_vars: all vars of one block\n one_op: one operator to count\n Returns:\n in_data_shape: one operator's input data shape\n out_data_shape: one operator's output data shape\n params: one operator's PARAMs \n flops: : one operator's FLOPs\n \"\"\"\n if one_op.type in ['conv2d', 'depthwise_conv2d']:\n k_arg_shape = block_vars[one_op.input('Filter')[0]].shape\n in_data_shape = block_vars[one_op.input('Input')[0]].shape\n out_data_shape = block_vars[one_op.output('Output')[0]].shape\n c_out, c_in, k_h, k_w = k_arg_shape\n _, c_out_, h_out, w_out = out_data_shape\n assert c_out == c_out_, 'shape error!'\n k_groups = one_op.attr('groups')\n kernel_ops = k_h * k_w * (c_in / k_groups)\n bias_ops = 0 if one_op.input('Bias') == [] else 1\n params = c_out * (kernel_ops + bias_ops)\n flops = h_out * w_out * c_out * (kernel_ops + bias_ops)\n flops = 2 * flops\n elif one_op.type == 'pool2d':\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n _, c_out, h_out, w_out = out_data_shape\n k_size = one_op.attr('ksize')\n params = 0\n flops = h_out * w_out * c_out * (k_size[0] * k_size[1])\n elif one_op.type == 'mul':\n k_arg_shape = block_vars[one_op.input('Y')[0]].shape\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n if 'fc' not in one_op.output('Out')[0]:\n return None\n k_in, k_out = k_arg_shape\n params = k_in * k_out + 1\n flops = k_in * k_out\n elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu']:\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n params = 0\n if one_op.type == 'prelu':\n params = 1\n flops = 1\n for one_dim in in_data_shape:\n if one_dim != -1:\n flops *= one_dim\n elif one_op.type == 'batch_norm':\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Y')[0]].shape\n _, c_in, h_out, w_out = in_data_shape\n params = c_in * 2\n flops = h_out * w_out * c_in * 2\n else:\n return one_op.type\n return in_data_shape, out_data_shape, params, flops\n\n\ndef _format_summary(collected_ops_list, batch_size, bits_per_tensor):\n \"\"\"\n Format summary report.\n Args:\n collected_ops_list: the collected operator with summary\n Returns:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n summary_table = PrettyTable(['No.', 'TYPE', 'INPUT', 'OUTPUT', 'PARAMs',\n 'FLOPs'])\n summary_table.align = 'r'\n total = {}\n total_params = []\n total_flops = []\n total_outshape = []\n for i, one_op in enumerate(collected_ops_list):\n table_row = [i, one_op['type'], one_op['input_shape'], one_op[\n 'out_shape'], int(one_op['PARAMs']), int(one_op['FLOPs'])]\n if i == 0:\n input_shape = one_op['input_shape']\n summary_table.add_row(table_row)\n total_params.append(int(one_op['PARAMs']))\n total_flops.append(int(one_op['FLOPs']))\n total_outshape.append(one_op['out_shape'])\n total['params'] = total_params\n total['flops'] = total_flops\n total['out'] = total_outshape\n total['gpu'] = cal_gpu_memory(total, input_shape, batch_size,\n bits_per_tensor)\n return summary_table, total\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n \"\"\"\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"\"\"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\"\"\n )\n print('Unsupported operator types:', unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / 10 **\n 6))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10 ** 9)\n )\n",
"<docstring token>\n<import token>\n\n\ndef summary(main_prog, batch_size, bits_per_tensor):\n \"\"\"\n It can summary model's PARAMS, FLOPs until now.\n It support common operator like conv, fc, pool, relu, sigmoid, bn etc. \n Args:\n main_prog: main program \n Returns:\n print summary on terminal\n \"\"\"\n blocks = main_prog.blocks\n collected_ops_list = []\n unsupported = set()\n block_vars = {}\n for block in blocks:\n block_vars = {**block_vars, **block.vars}\n block_ops = [ele for block in blocks for ele in block.ops]\n for one_op in block_ops:\n op_info = OrderedDict()\n spf_res = _summary_model(block_vars, one_op)\n if spf_res is None:\n continue\n if type(spf_res) == str:\n unsupported.add(one_op.type)\n continue\n op_info['type'] = one_op.type\n op_info['input_shape'] = spf_res[0][1:]\n op_info['out_shape'] = spf_res[1][1:]\n op_info['PARAMs'] = spf_res[2]\n op_info['FLOPs'] = spf_res[3]\n collected_ops_list.append(op_info)\n summary_table, total = _format_summary(collected_ops_list, batch_size,\n bits_per_tensor)\n _print_summary(summary_table, total, unsupported)\n\n\ndef _summary_model(block_vars, one_op):\n \"\"\"\n Compute operator's params and flops.\n Args:\n block_vars: all vars of one block\n one_op: one operator to count\n Returns:\n in_data_shape: one operator's input data shape\n out_data_shape: one operator's output data shape\n params: one operator's PARAMs \n flops: : one operator's FLOPs\n \"\"\"\n if one_op.type in ['conv2d', 'depthwise_conv2d']:\n k_arg_shape = block_vars[one_op.input('Filter')[0]].shape\n in_data_shape = block_vars[one_op.input('Input')[0]].shape\n out_data_shape = block_vars[one_op.output('Output')[0]].shape\n c_out, c_in, k_h, k_w = k_arg_shape\n _, c_out_, h_out, w_out = out_data_shape\n assert c_out == c_out_, 'shape error!'\n k_groups = one_op.attr('groups')\n kernel_ops = k_h * k_w * (c_in / k_groups)\n bias_ops = 0 if one_op.input('Bias') == [] else 1\n params = c_out * (kernel_ops + bias_ops)\n flops = h_out * w_out * c_out * (kernel_ops + bias_ops)\n flops = 2 * flops\n elif one_op.type == 'pool2d':\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n _, c_out, h_out, w_out = out_data_shape\n k_size = one_op.attr('ksize')\n params = 0\n flops = h_out * w_out * c_out * (k_size[0] * k_size[1])\n elif one_op.type == 'mul':\n k_arg_shape = block_vars[one_op.input('Y')[0]].shape\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n if 'fc' not in one_op.output('Out')[0]:\n return None\n k_in, k_out = k_arg_shape\n params = k_in * k_out + 1\n flops = k_in * k_out\n elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu']:\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Out')[0]].shape\n params = 0\n if one_op.type == 'prelu':\n params = 1\n flops = 1\n for one_dim in in_data_shape:\n if one_dim != -1:\n flops *= one_dim\n elif one_op.type == 'batch_norm':\n in_data_shape = block_vars[one_op.input('X')[0]].shape\n out_data_shape = block_vars[one_op.output('Y')[0]].shape\n _, c_in, h_out, w_out = in_data_shape\n params = c_in * 2\n flops = h_out * w_out * c_in * 2\n else:\n return one_op.type\n return in_data_shape, out_data_shape, params, flops\n\n\ndef _format_summary(collected_ops_list, batch_size, bits_per_tensor):\n \"\"\"\n Format summary report.\n Args:\n collected_ops_list: the collected operator with summary\n Returns:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n summary_table = PrettyTable(['No.', 'TYPE', 'INPUT', 'OUTPUT', 'PARAMs',\n 'FLOPs'])\n summary_table.align = 'r'\n total = {}\n total_params = []\n total_flops = []\n total_outshape = []\n for i, one_op in enumerate(collected_ops_list):\n table_row = [i, one_op['type'], one_op['input_shape'], one_op[\n 'out_shape'], int(one_op['PARAMs']), int(one_op['FLOPs'])]\n if i == 0:\n input_shape = one_op['input_shape']\n summary_table.add_row(table_row)\n total_params.append(int(one_op['PARAMs']))\n total_flops.append(int(one_op['FLOPs']))\n total_outshape.append(one_op['out_shape'])\n total['params'] = total_params\n total['flops'] = total_flops\n total['out'] = total_outshape\n total['gpu'] = cal_gpu_memory(total, input_shape, batch_size,\n bits_per_tensor)\n return summary_table, total\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n \"\"\"\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"\"\"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\"\"\n )\n print('Unsupported operator types:', unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / 10 **\n 6))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10 ** 9)\n )\n",
"<docstring token>\n<import token>\n\n\ndef summary(main_prog, batch_size, bits_per_tensor):\n \"\"\"\n It can summary model's PARAMS, FLOPs until now.\n It support common operator like conv, fc, pool, relu, sigmoid, bn etc. \n Args:\n main_prog: main program \n Returns:\n print summary on terminal\n \"\"\"\n blocks = main_prog.blocks\n collected_ops_list = []\n unsupported = set()\n block_vars = {}\n for block in blocks:\n block_vars = {**block_vars, **block.vars}\n block_ops = [ele for block in blocks for ele in block.ops]\n for one_op in block_ops:\n op_info = OrderedDict()\n spf_res = _summary_model(block_vars, one_op)\n if spf_res is None:\n continue\n if type(spf_res) == str:\n unsupported.add(one_op.type)\n continue\n op_info['type'] = one_op.type\n op_info['input_shape'] = spf_res[0][1:]\n op_info['out_shape'] = spf_res[1][1:]\n op_info['PARAMs'] = spf_res[2]\n op_info['FLOPs'] = spf_res[3]\n collected_ops_list.append(op_info)\n summary_table, total = _format_summary(collected_ops_list, batch_size,\n bits_per_tensor)\n _print_summary(summary_table, total, unsupported)\n\n\n<function token>\n\n\ndef _format_summary(collected_ops_list, batch_size, bits_per_tensor):\n \"\"\"\n Format summary report.\n Args:\n collected_ops_list: the collected operator with summary\n Returns:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n summary_table = PrettyTable(['No.', 'TYPE', 'INPUT', 'OUTPUT', 'PARAMs',\n 'FLOPs'])\n summary_table.align = 'r'\n total = {}\n total_params = []\n total_flops = []\n total_outshape = []\n for i, one_op in enumerate(collected_ops_list):\n table_row = [i, one_op['type'], one_op['input_shape'], one_op[\n 'out_shape'], int(one_op['PARAMs']), int(one_op['FLOPs'])]\n if i == 0:\n input_shape = one_op['input_shape']\n summary_table.add_row(table_row)\n total_params.append(int(one_op['PARAMs']))\n total_flops.append(int(one_op['FLOPs']))\n total_outshape.append(one_op['out_shape'])\n total['params'] = total_params\n total['flops'] = total_flops\n total['out'] = total_outshape\n total['gpu'] = cal_gpu_memory(total, input_shape, batch_size,\n bits_per_tensor)\n return summary_table, total\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n \"\"\"\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"\"\"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\"\"\n )\n print('Unsupported operator types:', unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / 10 **\n 6))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10 ** 9)\n )\n",
"<docstring token>\n<import token>\n\n\ndef summary(main_prog, batch_size, bits_per_tensor):\n \"\"\"\n It can summary model's PARAMS, FLOPs until now.\n It support common operator like conv, fc, pool, relu, sigmoid, bn etc. \n Args:\n main_prog: main program \n Returns:\n print summary on terminal\n \"\"\"\n blocks = main_prog.blocks\n collected_ops_list = []\n unsupported = set()\n block_vars = {}\n for block in blocks:\n block_vars = {**block_vars, **block.vars}\n block_ops = [ele for block in blocks for ele in block.ops]\n for one_op in block_ops:\n op_info = OrderedDict()\n spf_res = _summary_model(block_vars, one_op)\n if spf_res is None:\n continue\n if type(spf_res) == str:\n unsupported.add(one_op.type)\n continue\n op_info['type'] = one_op.type\n op_info['input_shape'] = spf_res[0][1:]\n op_info['out_shape'] = spf_res[1][1:]\n op_info['PARAMs'] = spf_res[2]\n op_info['FLOPs'] = spf_res[3]\n collected_ops_list.append(op_info)\n summary_table, total = _format_summary(collected_ops_list, batch_size,\n bits_per_tensor)\n _print_summary(summary_table, total, unsupported)\n\n\n<function token>\n<function token>\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n \"\"\"\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"\"\"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\"\"\n )\n print('Unsupported operator types:', unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / 10 **\n 6))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10 ** 9)\n )\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\ndef _print_summary(summary_table, total, unsupported):\n \"\"\"\n Print all the summary on terminal.\n Args:\n summary_table: summary report format\n total: sum param and flops\n \"\"\"\n parmas = total['params']\n flops = total['flops']\n gpu = total['gpu']\n print(\n \"\"\"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu), Elementwise operations]\"\"\"\n )\n print('Unsupported operator types:', unsupported)\n print(summary_table)\n print('Total PARAMs: {}({:.4f}M)'.format(sum(parmas), sum(parmas) / 10 **\n 6))\n print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))\n print('GPU Memory Usage: {}({:.2f}GB)'.format(sum(gpu), sum(gpu) / 10 ** 9)\n )\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef cal_gpu_memory(total, input_shape, batch_size, bits_per_tensor):\n gpu_input = prod(input_shape)\n gpu_param = total['params']\n gpu_backward_forward = sum([prod(ele) for ele in total['out']])\n gpu = (gpu_input + gpu_param + gpu_backward_forward) * (batch_size *\n bits_per_tensor / 8)\n return gpu\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,552 |
0d1b1ad9fc12e5d78ea5f8ee838e549def01b3f0
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 17:31:33 2018
@author: Wahba
"""
print('Enter your interval')
bgn=int(input())
end=int(input())
Armsttrong_arr = []
for i in range(bgn,end+1):
stri = str(i)
sum = 0
for j in range (len(stri)):
sum=sum+int(stri[j])**len(stri)
if sum == i :
Armsttrong_arr.append(i)
print( Armsttrong_arr)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 17:31:33 2018\n\n@author: Wahba\n\"\"\"\n\nprint('Enter your interval')\nbgn=int(input())\nend=int(input())\nArmsttrong_arr = []\nfor i in range(bgn,end+1):\n stri = str(i)\n sum = 0\n for j in range (len(stri)):\n sum=sum+int(stri[j])**len(stri)\n if sum == i :\n Armsttrong_arr.append(i)\nprint( Armsttrong_arr)",
"<docstring token>\nprint('Enter your interval')\nbgn = int(input())\nend = int(input())\nArmsttrong_arr = []\nfor i in range(bgn, end + 1):\n stri = str(i)\n sum = 0\n for j in range(len(stri)):\n sum = sum + int(stri[j]) ** len(stri)\n if sum == i:\n Armsttrong_arr.append(i)\nprint(Armsttrong_arr)\n",
"<docstring token>\nprint('Enter your interval')\n<assignment token>\nfor i in range(bgn, end + 1):\n stri = str(i)\n sum = 0\n for j in range(len(stri)):\n sum = sum + int(stri[j]) ** len(stri)\n if sum == i:\n Armsttrong_arr.append(i)\nprint(Armsttrong_arr)\n",
"<docstring token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,553 |
8db04f5ca90683c5c7eb2aff9b7b82c03d9c6110
|
# -*- coding: utf-8 -*-
import unittest
from mahjong.ai.agari import Agari
from utils.tests import TestMixin
class AgariTestCase(unittest.TestCase, TestMixin):
def test_is_agari(self):
agari = Agari()
tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='123456789', pin='11123')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='123456789', honors='11777')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='12345556778899')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='11123456788999')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='233334', pin='789', man='345', honors='55')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
def test_is_not_agari(self):
agari = Agari()
tiles = self._string_to_136_array(sou='123456789', pin='12345')
self.assertFalse(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='111222444', pin='11145')
self.assertFalse(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='11122233356888')
self.assertFalse(agari.is_agari(self._to_34_array(tiles)))
def test_is_chitoitsu_agari(self):
agari = Agari()
tiles = self._string_to_136_array(sou='1133557799', pin='1199')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='2244', pin='1199', man='11', honors='2277')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(man='11223344556677')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
def test_is_kokushi_musou_agari(self):
agari = Agari()
tiles = self._string_to_136_array(sou='19', pin='19', man='199', honors='1234567')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='11234567')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='12345677')
self.assertTrue(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='129', pin='19', man='19', honors='1234567')
self.assertFalse(agari.is_agari(self._to_34_array(tiles)))
tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='11134567')
self.assertFalse(agari.is_agari(self._to_34_array(tiles)))
def test_is_agari_and_open_hand(self):
agari = Agari()
tiles = self._string_to_136_array(sou='23455567', pin='222', man='345')
open_sets = [self._string_to_open_34_set(man='345'), self._string_to_open_34_set(sou='555')]
self.assertFalse(agari.is_agari(self._to_34_array(tiles), open_sets))
|
[
"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom mahjong.ai.agari import Agari\nfrom utils.tests import TestMixin\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='233334', pin='789', man='345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_not_agari(self):\n agari = Agari()\n\n tiles = self._string_to_136_array(sou='123456789', pin='12345')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='111222444', pin='11145')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='11122233356888')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11', honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_kokushi_musou_agari(self):\n agari = Agari()\n\n tiles = self._string_to_136_array(sou='19', pin='19', man='199', honors='1234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='11234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='12345677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='129', pin='19', man='19', honors='1234567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n tiles = self._string_to_136_array(sou='19', pin='19', man='19', honors='11134567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_agari_and_open_hand(self):\n agari = Agari()\n\n tiles = self._string_to_136_array(sou='23455567', pin='222', man='345')\n open_sets = [self._string_to_open_34_set(man='345'), self._string_to_open_34_set(sou='555')]\n self.assertFalse(agari.is_agari(self._to_34_array(tiles), open_sets))\n",
"import unittest\nfrom mahjong.ai.agari import Agari\nfrom utils.tests import TestMixin\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='233334', pin='789', man=\n '345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_not_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='12345')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='111222444', pin='11145')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11122233356888')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_kokushi_musou_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='19', pin='19', man='199',\n honors='1234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='12345677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='129', pin='19', man='19',\n honors='1234567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11134567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_agari_and_open_hand(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='23455567', pin='222', man='345')\n open_sets = [self._string_to_open_34_set(man='345'), self.\n _string_to_open_34_set(sou='555')]\n self.assertFalse(agari.is_agari(self._to_34_array(tiles), open_sets))\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='233334', pin='789', man=\n '345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_not_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='12345')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='111222444', pin='11145')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11122233356888')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_kokushi_musou_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='19', pin='19', man='199',\n honors='1234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='12345677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='129', pin='19', man='19',\n honors='1234567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11134567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_agari_and_open_hand(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='23455567', pin='222', man='345')\n open_sets = [self._string_to_open_34_set(man='345'), self.\n _string_to_open_34_set(sou='555')]\n self.assertFalse(agari.is_agari(self._to_34_array(tiles), open_sets))\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='233334', pin='789', man=\n '345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_not_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='12345')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='111222444', pin='11145')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11122233356888')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_kokushi_musou_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='19', pin='19', man='199',\n honors='1234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11234567')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='12345677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='129', pin='19', man='19',\n honors='1234567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='19', pin='19', man='19',\n honors='11134567')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n <function token>\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='233334', pin='789', man=\n '345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_not_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='12345')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='111222444', pin='11145')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11122233356888')\n self.assertFalse(agari.is_agari(self._to_34_array(tiles)))\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n <function token>\n <function token>\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n\n def test_is_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='123456789', pin='123', man='33')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', pin='11123')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='123456789', honors='11777')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='12345556778899')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='11123456788999')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='233334', pin='789', man=\n '345', honors='55')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n <function token>\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n <function token>\n <function token>\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n <function token>\n <function token>\n\n def test_is_chitoitsu_agari(self):\n agari = Agari()\n tiles = self._string_to_136_array(sou='1133557799', pin='1199')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(sou='2244', pin='1199', man='11',\n honors='2277')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n tiles = self._string_to_136_array(man='11223344556677')\n self.assertTrue(agari.is_agari(self._to_34_array(tiles)))\n <function token>\n <function token>\n",
"<import token>\n\n\nclass AgariTestCase(unittest.TestCase, TestMixin):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,554 |
7886ca06f8d960a6f601c10cce5a1901f2ad28be
|
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
def load_dataset(path):
data = load_files(path)
ship_files = np.array(data['filenames'])
ship_targets = np_utils.to_categorical(np.array(data['target']), 133)
return ship_files, ship_targets
test_files, test_targets = load_dataset('shipImages/test')
ship_names = [item[20:-1] for item in sorted(glob("shipImages/train/*/"))]
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# pre-process the data for Keras
test_tensors = paths_to_tensor(test_files).astype('float32')/255
###########################
# from keras.applications.resnet50 import ResNet50
# from keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50
# def extract_Resnet50(file_paths):
# tensors = paths_to_tensor(file_paths).astype('float32')
# preprocessed_input = preprocess_input_resnet50(tensors)
# return ResNet50(weights='imagenet', include_top=False).predict(preprocessed_input, batch_size=32)
# test_resnet50 = extract_Resnet50(test_files)
# print("Resnet50 shape", test_resnet50.shape[1:])
# from keras.layers.pooling import GlobalAveragePooling2D
# from keras.layers.merge import Concatenate
# from keras.layers import Input, Dense
# from keras.layers.core import Dropout, Activation
# from keras.callbacks import ModelCheckpoint
# from keras.layers.normalization import BatchNormalization
# from keras.models import Model
# def input_branch(input_shape=None):
# size = int(input_shape[2] / 4)
# branch_input = Input(shape=input_shape)
# branch = GlobalAveragePooling2D()(branch_input)
# branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)
# branch = BatchNormalization()(branch)
# branch = Activation("relu")(branch)
# return branch, branch_input
# resnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))
# net = Dropout(0.3)(resnet50_branch)
# net = Dense(640, use_bias=False, kernel_initializer='uniform')(net)
# net = BatchNormalization()(net)
# net = Activation("relu")(net)
# net = Dropout(0.3)(net)
# net = Dense(133, kernel_initializer='uniform', activation="softmax")(net)
# model = Model(inputs=[resnet50_input], outputs=[net])
# model.summary()
# model.compile(loss='categorical_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
# model.load_weights('ship_models/bestmodel.hdf5')
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50
def extract_Resnet50(file_paths):
tensors = paths_to_tensor(file_paths).astype('float32')
preprocessed_input = preprocess_input_resnet50(tensors)
return ResNet50(weights='imagenet', include_top=False).predict(preprocessed_input, batch_size=32)
# ## Extract feature
test_resnet50 = extract_Resnet50(test_files)
print("Resnet50 shape", test_resnet50.shape[1:])
# ## Retrain the last layers for our data
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers.merge import Concatenate
from keras.layers import Input, Dense
from keras.layers.core import Dropout, Activation
from keras.callbacks import ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.models import Model
def input_branch(input_shape=None):
size = int(input_shape[2] / 4)
branch_input = Input(shape=input_shape)
branch = GlobalAveragePooling2D()(branch_input)
branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)
branch = BatchNormalization()(branch)
branch = Activation("relu")(branch)
return branch, branch_input
resnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))
net = Dropout(0.3)(resnet50_branch)
net = Dense(640, use_bias=False, kernel_initializer='uniform')(net)
net = BatchNormalization()(net)
net = Activation("relu")(net)
net = Dropout(0.3)(net)
net = Dense(133, kernel_initializer='uniform', activation="softmax")(net)
model = Model(inputs=[resnet50_input], outputs=[net])
model.summary()
# ## Test the model
model.load_weights('ship_models/bestmodel.hdf5')
from sklearn.metrics import accuracy_score
predictions = model.predict([test_resnet50])
class_predictions = [np.argmax(prediction) for prediction in predictions]
class_true_labels = [np.argmax(true_label) for true_label in test_targets]
print('Test accuracy: %.4f%%' % (accuracy_score(class_true_labels, class_predictions) * 100))
import shutil
import pathlib
import cv2
import os
def save_test_results(test_files, true_path, false_path):
# shutil.rmtree(true_path)
# shutil.rmtree(false_path)
pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)
class_encoding = {0: "Fishing", 1: "Cargo", 2: "Tanker"}
for i, img in tqdm(enumerate(test_files)):
try:
imname = img.split('/')[-1]
im = cv2.imread(img)
cv2.putText(im, "Prediction: {} True: {}".format(class_encoding[class_predictions[i]], class_encoding[class_true_labels[i]]),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
cv2.imwrite(os.path.join(true_path, imname), im) if class_predictions[i]==class_true_labels[i] else cv2.imwrite(os.path.join(false_path, imname), im)
except:
pass
save_test_results(test_files, 'res_true', 'res_false')
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusion_matrix.png')
class_names = ["Fishing", "Cargo", "Tanker"]
#class_names = np.unique(class_predictions)
cnf_matrix = confusion_matrix(class_true_labels, class_predictions)
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
|
[
"from sklearn.datasets import load_files \nfrom keras.utils import np_utils\nimport numpy as np\nfrom glob import glob\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\ntest_files, test_targets = load_dataset('shipImages/test')\nship_names = [item[20:-1] for item in sorted(glob(\"shipImages/train/*/\"))]\n\nfrom keras.preprocessing import image \nfrom tqdm import tqdm\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]\n return np.vstack(list_of_tensors)\n\nfrom PIL import ImageFile \nImageFile.LOAD_TRUNCATED_IMAGES = True \n\n# pre-process the data for Keras\ntest_tensors = paths_to_tensor(test_files).astype('float32')/255\n\n###########################\n# from keras.applications.resnet50 import ResNet50\n# from keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50\n\n# def extract_Resnet50(file_paths):\n# tensors = paths_to_tensor(file_paths).astype('float32')\n# preprocessed_input = preprocess_input_resnet50(tensors)\n# return ResNet50(weights='imagenet', include_top=False).predict(preprocessed_input, batch_size=32)\n\n# test_resnet50 = extract_Resnet50(test_files)\n# print(\"Resnet50 shape\", test_resnet50.shape[1:])\n\n# from keras.layers.pooling import GlobalAveragePooling2D\n# from keras.layers.merge import Concatenate\n# from keras.layers import Input, Dense\n# from keras.layers.core import Dropout, Activation\n# from keras.callbacks import ModelCheckpoint\n# from keras.layers.normalization import BatchNormalization\n# from keras.models import Model\n\n# def input_branch(input_shape=None):\n \n# size = int(input_shape[2] / 4)\n \n# branch_input = Input(shape=input_shape)\n# branch = GlobalAveragePooling2D()(branch_input)\n# branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n# branch = BatchNormalization()(branch)\n# branch = Activation(\"relu\")(branch)\n# return branch, branch_input\n\n# resnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))\n# net = Dropout(0.3)(resnet50_branch)\n# net = Dense(640, use_bias=False, kernel_initializer='uniform')(net)\n# net = BatchNormalization()(net)\n# net = Activation(\"relu\")(net)\n# net = Dropout(0.3)(net)\n# net = Dense(133, kernel_initializer='uniform', activation=\"softmax\")(net)\n\n# model = Model(inputs=[resnet50_input], outputs=[net])\n# model.summary()\n\n# model.compile(loss='categorical_crossentropy', optimizer=\"rmsprop\", metrics=['accuracy'])\n# model.load_weights('ship_models/bestmodel.hdf5')\n\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(preprocessed_input, batch_size=32)\n\n# ## Extract feature\ntest_resnet50 = extract_Resnet50(test_files)\nprint(\"Resnet50 shape\", test_resnet50.shape[1:])\n\n# ## Retrain the last layers for our data\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers import Input, Dense\nfrom keras.layers.core import Dropout, Activation\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\n\ndef input_branch(input_shape=None):\n \n size = int(input_shape[2] / 4)\n \n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation(\"relu\")(branch)\n return branch, branch_input\n\nresnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))\nnet = Dropout(0.3)(resnet50_branch)\nnet = Dense(640, use_bias=False, kernel_initializer='uniform')(net)\nnet = BatchNormalization()(net)\nnet = Activation(\"relu\")(net)\nnet = Dropout(0.3)(net)\nnet = Dense(133, kernel_initializer='uniform', activation=\"softmax\")(net)\n\nmodel = Model(inputs=[resnet50_input], outputs=[net])\nmodel.summary()\n\n# ## Test the model\nmodel.load_weights('ship_models/bestmodel.hdf5')\n\nfrom sklearn.metrics import accuracy_score\n\npredictions = model.predict([test_resnet50])\nclass_predictions = [np.argmax(prediction) for prediction in predictions]\nclass_true_labels = [np.argmax(true_label) for true_label in test_targets]\nprint('Test accuracy: %.4f%%' % (accuracy_score(class_true_labels, class_predictions) * 100))\n\nimport shutil\nimport pathlib\nimport cv2\nimport os\n\ndef save_test_results(test_files, true_path, false_path):\n # shutil.rmtree(true_path)\n # shutil.rmtree(false_path)\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {0: \"Fishing\", 1: \"Cargo\", 2: \"Tanker\"}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, \"Prediction: {} True: {}\".format(class_encoding[class_predictions[i]], class_encoding[class_true_labels[i]]),\n (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im) if class_predictions[i]==class_true_labels[i] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\nsave_test_results(test_files, 'res_true', 'res_false')\n\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\nclass_names = [\"Fishing\", \"Cargo\", \"Tanker\"]\n#class_names = np.unique(class_predictions)\ncnf_matrix = confusion_matrix(class_true_labels, class_predictions) \nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n",
"from sklearn.datasets import load_files\nfrom keras.utils import np_utils\nimport numpy as np\nfrom glob import glob\n\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\n\ntest_files, test_targets = load_dataset('shipImages/test')\nship_names = [item[20:-1] for item in sorted(glob('shipImages/train/*/'))]\nfrom keras.preprocessing import image\nfrom tqdm import tqdm\n\n\ndef path_to_tensor(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n return np.expand_dims(x, axis=0)\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\ntest_tensors = paths_to_tensor(test_files).astype('float32') / 255\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\ntest_resnet50 = extract_Resnet50(test_files)\nprint('Resnet50 shape', test_resnet50.shape[1:])\nfrom keras.layers.pooling import GlobalAveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers import Input, Dense\nfrom keras.layers.core import Dropout, Activation\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\nresnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))\nnet = Dropout(0.3)(resnet50_branch)\nnet = Dense(640, use_bias=False, kernel_initializer='uniform')(net)\nnet = BatchNormalization()(net)\nnet = Activation('relu')(net)\nnet = Dropout(0.3)(net)\nnet = Dense(133, kernel_initializer='uniform', activation='softmax')(net)\nmodel = Model(inputs=[resnet50_input], outputs=[net])\nmodel.summary()\nmodel.load_weights('ship_models/bestmodel.hdf5')\nfrom sklearn.metrics import accuracy_score\npredictions = model.predict([test_resnet50])\nclass_predictions = [np.argmax(prediction) for prediction in predictions]\nclass_true_labels = [np.argmax(true_label) for true_label in test_targets]\nprint('Test accuracy: %.4f%%' % (accuracy_score(class_true_labels,\n class_predictions) * 100))\nimport shutil\nimport pathlib\nimport cv2\nimport os\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\nsave_test_results(test_files, 'res_true', 'res_false')\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport itertools\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\nclass_names = ['Fishing', 'Cargo', 'Tanker']\ncnf_matrix = confusion_matrix(class_true_labels, class_predictions)\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n",
"<import token>\n\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\n\ntest_files, test_targets = load_dataset('shipImages/test')\nship_names = [item[20:-1] for item in sorted(glob('shipImages/train/*/'))]\n<import token>\n\n\ndef path_to_tensor(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n return np.expand_dims(x, axis=0)\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\nImageFile.LOAD_TRUNCATED_IMAGES = True\ntest_tensors = paths_to_tensor(test_files).astype('float32') / 255\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\ntest_resnet50 = extract_Resnet50(test_files)\nprint('Resnet50 shape', test_resnet50.shape[1:])\n<import token>\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\nresnet50_branch, resnet50_input = input_branch(input_shape=(1, 1, 2048))\nnet = Dropout(0.3)(resnet50_branch)\nnet = Dense(640, use_bias=False, kernel_initializer='uniform')(net)\nnet = BatchNormalization()(net)\nnet = Activation('relu')(net)\nnet = Dropout(0.3)(net)\nnet = Dense(133, kernel_initializer='uniform', activation='softmax')(net)\nmodel = Model(inputs=[resnet50_input], outputs=[net])\nmodel.summary()\nmodel.load_weights('ship_models/bestmodel.hdf5')\n<import token>\npredictions = model.predict([test_resnet50])\nclass_predictions = [np.argmax(prediction) for prediction in predictions]\nclass_true_labels = [np.argmax(true_label) for true_label in test_targets]\nprint('Test accuracy: %.4f%%' % (accuracy_score(class_true_labels,\n class_predictions) * 100))\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\nsave_test_results(test_files, 'res_true', 'res_false')\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\nclass_names = ['Fishing', 'Cargo', 'Tanker']\ncnf_matrix = confusion_matrix(class_true_labels, class_predictions)\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n",
"<import token>\n\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\n\n<assignment token>\n<import token>\n\n\ndef path_to_tensor(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n return np.expand_dims(x, axis=0)\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\nprint('Resnet50 shape', test_resnet50.shape[1:])\n<import token>\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\n<assignment token>\nmodel.summary()\nmodel.load_weights('ship_models/bestmodel.hdf5')\n<import token>\n<assignment token>\nprint('Test accuracy: %.4f%%' % (accuracy_score(class_true_labels,\n class_predictions) * 100))\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\nsave_test_results(test_files, 'res_true', 'res_false')\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n",
"<import token>\n\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\n\n<assignment token>\n<import token>\n\n\ndef path_to_tensor(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n return np.expand_dims(x, axis=0)\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\n<code token>\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef load_dataset(path):\n data = load_files(path)\n ship_files = np.array(data['filenames'])\n ship_targets = np_utils.to_categorical(np.array(data['target']), 133)\n return ship_files, ship_targets\n\n\n<assignment token>\n<import token>\n<function token>\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\n<code token>\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n\n\ndef input_branch(input_shape=None):\n size = int(input_shape[2] / 4)\n branch_input = Input(shape=input_shape)\n branch = GlobalAveragePooling2D()(branch_input)\n branch = Dense(size, use_bias=False, kernel_initializer='uniform')(branch)\n branch = BatchNormalization()(branch)\n branch = Activation('relu')(branch)\n return branch, branch_input\n\n\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\n<code token>\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n\n\ndef save_test_results(test_files, true_path, false_path):\n pathlib.Path(true_path).mkdir(parents=True, exist_ok=True)\n pathlib.Path(false_path).mkdir(parents=True, exist_ok=True)\n class_encoding = {(0): 'Fishing', (1): 'Cargo', (2): 'Tanker'}\n for i, img in tqdm(enumerate(test_files)):\n try:\n imname = img.split('/')[-1]\n im = cv2.imread(img)\n cv2.putText(im, 'Prediction: {} True: {}'.format(class_encoding\n [class_predictions[i]], class_encoding[class_true_labels[i]\n ]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)\n cv2.imwrite(os.path.join(true_path, imname), im\n ) if class_predictions[i] == class_true_labels[i\n ] else cv2.imwrite(os.path.join(false_path, imname), im)\n except:\n pass\n\n\n<code token>\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n<function token>\n<code token>\n<import token>\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title=\n 'Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.0\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',\n color='white' if cm[i, j] > thresh else 'black')\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n\n\ndef paths_to_tensor(img_paths):\n list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)\n ]\n return np.vstack(list_of_tensors)\n\n\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n<function token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<import token>\n<assignment token>\n<import token>\n\n\ndef extract_Resnet50(file_paths):\n tensors = paths_to_tensor(file_paths).astype('float32')\n preprocessed_input = preprocess_input_resnet50(tensors)\n return ResNet50(weights='imagenet', include_top=False).predict(\n preprocessed_input, batch_size=32)\n\n\n<assignment token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n<function token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<import token>\n<function token>\n<function token>\n<import token>\n<assignment token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n<import token>\n<function token>\n<code token>\n<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,555 |
bb94ad5b798acb124b7a5fe223280dfa84b3b55d
|
#import socket library
from random import randint
from socket import *
#set port to 12000
serverPort = 12000
#create server socket
print 'Setting up TCP Socket'
serverSocket = socket(AF_INET,SOCK_STREAM)
#make server listen to port 12000
serverSocket.bind(('',serverPort))
serverSocket.listen(1)
print 'SERVER_PORT={}'.format(serverPort)
#wait for a connetion to client
while 1:
connectionSocket, addr = serverSocket.accept()
#store client's string into a buffer
sentence = connectionSocket.recv(1024)
if int(sentence) == 13:
r_port = randint(1420,11000)
print 'Negotiation accepted, sending r_port',r_port
serverPort = r_port
connectionSocket.send(str(r_port))
print 'Setting up UDP socket'
serverSocket2 = socket(AF_INET, SOCK_DGRAM)
serverSocket2.bind(('', serverPort))
sentence, clientAddress = serverSocket2.recvfrom(2048)
print 'Reversing message...'
reversedMessage = sentence[::-1]
#send it back to client through socket
print 'Message sent.'
serverSocket2.sendto(reversedMessage, clientAddress)
connectionSocket.close()
|
[
"#import socket library\nfrom random import randint\nfrom socket import * \n#set port to 12000\nserverPort = 12000\n#create server socket \nprint 'Setting up TCP Socket'\nserverSocket = socket(AF_INET,SOCK_STREAM) \n#make server listen to port 12000\nserverSocket.bind(('',serverPort)) \nserverSocket.listen(1) \nprint 'SERVER_PORT={}'.format(serverPort)\n#wait for a connetion to client\nwhile 1: \n connectionSocket, addr = serverSocket.accept() \n #store client's string into a buffer\n sentence = connectionSocket.recv(1024)\n \n if int(sentence) == 13:\n r_port = randint(1420,11000)\n print 'Negotiation accepted, sending r_port',r_port\n serverPort = r_port\n connectionSocket.send(str(r_port))\n print 'Setting up UDP socket'\n serverSocket2 = socket(AF_INET, SOCK_DGRAM)\n serverSocket2.bind(('', serverPort))\n sentence, clientAddress = serverSocket2.recvfrom(2048) \n print 'Reversing message...'\n reversedMessage = sentence[::-1]\n #send it back to client through socket \n print 'Message sent.'\n serverSocket2.sendto(reversedMessage, clientAddress)\n connectionSocket.close() \n"
] | true |
99,556 |
c82c2e17ce8c7d95817023cbfba5d5df7cf6b7f4
|
from django.contrib import admin
from orders.models import Order, OrderItem, Coupon
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ('product',)
@admin.register(Order)
class OrderAdd(admin.ModelAdmin):
list_display = ('id', 'user', 'created', 'modified', 'is_paid')
list_filter = ('is_paid',)
inlines = (OrderItemInline,)
@admin.register(Coupon)
class CouponAdmin(admin.ModelAdmin):
list_display = ('code', 'valid_from', 'valid_to', 'discount', 'is_active')
list_filter = ('is_active', 'valid_from', 'valid_to')
search_fields = ('code',)
|
[
"from django.contrib import admin\n\nfrom orders.models import Order, OrderItem, Coupon\n\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n raw_id_fields = ('product',)\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n list_display = ('id', 'user', 'created', 'modified', 'is_paid')\n list_filter = ('is_paid',)\n inlines = (OrderItemInline,)\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = ('code', 'valid_from', 'valid_to', 'discount', 'is_active')\n list_filter = ('is_active', 'valid_from', 'valid_to')\n search_fields = ('code',)\n",
"from django.contrib import admin\nfrom orders.models import Order, OrderItem, Coupon\n\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n raw_id_fields = 'product',\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n list_display = 'id', 'user', 'created', 'modified', 'is_paid'\n list_filter = 'is_paid',\n inlines = OrderItemInline,\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n raw_id_fields = 'product',\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n list_display = 'id', 'user', 'created', 'modified', 'is_paid'\n list_filter = 'is_paid',\n inlines = OrderItemInline,\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n\n\nclass OrderItemInline(admin.TabularInline):\n <assignment token>\n <assignment token>\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n list_display = 'id', 'user', 'created', 'modified', 'is_paid'\n list_filter = 'is_paid',\n inlines = OrderItemInline,\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n<class token>\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n list_display = 'id', 'user', 'created', 'modified', 'is_paid'\n list_filter = 'is_paid',\n inlines = OrderItemInline,\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n<class token>\n\n\[email protected](Order)\nclass OrderAdd(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n<class token>\n<class token>\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n list_display = 'code', 'valid_from', 'valid_to', 'discount', 'is_active'\n list_filter = 'is_active', 'valid_from', 'valid_to'\n search_fields = 'code',\n",
"<import token>\n<class token>\n<class token>\n\n\[email protected](Coupon)\nclass CouponAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,557 |
b457d1dfd437d64dfa649007b800f4dd681565ed
|
import json
import uuid
import copy
filename = '../data/rotterdam/cityjson/t.json'
fin = open(filename)
j = json.loads(fin.read())
j2 = copy.deepcopy(j)
j2['metadata']['referenceSystem'] = 'urn:ogc:def:crs:EPSG::7415'
j2['@context'] = []
j2['@context'].append("http://localhost:8080/contexts/context_imgeo.jsonld")
j2['@context'].append("http://localhost:8080/contexts/context_cityjson.jsonld")
uids = set()
for cid in j['CityObjects']:
pos = cid.rfind('_')
uids.add(cid[:pos])
# print(uids)
for uid in uids:
j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid + '_s']['geometry'][0])
j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid + '_w']['geometry'][0])
j2['CityObjects'][uid + '_k']['type'] = 'SolitaryVegetationObject'
j2['CityObjects'][uid + '_k']['attributes']['class'] = {}
j2['CityObjects'][uid + '_k']['attributes']['class'] = "VegetatieObject"
j2['CityObjects'][uid + '_k']['attributes']['function'] = "Boom"
del j2['CityObjects'][uid + '_s']
del j2['CityObjects'][uid + '_w']
json_str = json.dumps(j2)
fout = open('../data/rotterdam/cityjson/t_nl3d.json', 'w')
fout.write(json_str)
print('Done.')
|
[
"\nimport json\nimport uuid\nimport copy\n\nfilename = '../data/rotterdam/cityjson/t.json'\nfin = open(filename)\nj = json.loads(fin.read())\nj2 = copy.deepcopy(j)\n\n\n\nj2['metadata']['referenceSystem'] = 'urn:ogc:def:crs:EPSG::7415'\n\nj2['@context'] = []\nj2['@context'].append(\"http://localhost:8080/contexts/context_imgeo.jsonld\")\nj2['@context'].append(\"http://localhost:8080/contexts/context_cityjson.jsonld\")\n\nuids = set()\nfor cid in j['CityObjects']:\n pos = cid.rfind('_')\n uids.add(cid[:pos])\n# print(uids)\n\nfor uid in uids:\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid + '_s']['geometry'][0])\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid + '_w']['geometry'][0])\n j2['CityObjects'][uid + '_k']['type'] = 'SolitaryVegetationObject'\n j2['CityObjects'][uid + '_k']['attributes']['class'] = {}\n j2['CityObjects'][uid + '_k']['attributes']['class'] = \"VegetatieObject\"\n j2['CityObjects'][uid + '_k']['attributes']['function'] = \"Boom\"\n del j2['CityObjects'][uid + '_s']\n del j2['CityObjects'][uid + '_w']\n\njson_str = json.dumps(j2)\nfout = open('../data/rotterdam/cityjson/t_nl3d.json', 'w')\nfout.write(json_str)\nprint('Done.')\n\n",
"import json\nimport uuid\nimport copy\nfilename = '../data/rotterdam/cityjson/t.json'\nfin = open(filename)\nj = json.loads(fin.read())\nj2 = copy.deepcopy(j)\nj2['metadata']['referenceSystem'] = 'urn:ogc:def:crs:EPSG::7415'\nj2['@context'] = []\nj2['@context'].append('http://localhost:8080/contexts/context_imgeo.jsonld')\nj2['@context'].append('http://localhost:8080/contexts/context_cityjson.jsonld')\nuids = set()\nfor cid in j['CityObjects']:\n pos = cid.rfind('_')\n uids.add(cid[:pos])\nfor uid in uids:\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_s']['geometry'][0])\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_w']['geometry'][0])\n j2['CityObjects'][uid + '_k']['type'] = 'SolitaryVegetationObject'\n j2['CityObjects'][uid + '_k']['attributes']['class'] = {}\n j2['CityObjects'][uid + '_k']['attributes']['class'] = 'VegetatieObject'\n j2['CityObjects'][uid + '_k']['attributes']['function'] = 'Boom'\n del j2['CityObjects'][uid + '_s']\n del j2['CityObjects'][uid + '_w']\njson_str = json.dumps(j2)\nfout = open('../data/rotterdam/cityjson/t_nl3d.json', 'w')\nfout.write(json_str)\nprint('Done.')\n",
"<import token>\nfilename = '../data/rotterdam/cityjson/t.json'\nfin = open(filename)\nj = json.loads(fin.read())\nj2 = copy.deepcopy(j)\nj2['metadata']['referenceSystem'] = 'urn:ogc:def:crs:EPSG::7415'\nj2['@context'] = []\nj2['@context'].append('http://localhost:8080/contexts/context_imgeo.jsonld')\nj2['@context'].append('http://localhost:8080/contexts/context_cityjson.jsonld')\nuids = set()\nfor cid in j['CityObjects']:\n pos = cid.rfind('_')\n uids.add(cid[:pos])\nfor uid in uids:\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_s']['geometry'][0])\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_w']['geometry'][0])\n j2['CityObjects'][uid + '_k']['type'] = 'SolitaryVegetationObject'\n j2['CityObjects'][uid + '_k']['attributes']['class'] = {}\n j2['CityObjects'][uid + '_k']['attributes']['class'] = 'VegetatieObject'\n j2['CityObjects'][uid + '_k']['attributes']['function'] = 'Boom'\n del j2['CityObjects'][uid + '_s']\n del j2['CityObjects'][uid + '_w']\njson_str = json.dumps(j2)\nfout = open('../data/rotterdam/cityjson/t_nl3d.json', 'w')\nfout.write(json_str)\nprint('Done.')\n",
"<import token>\n<assignment token>\nj2['@context'].append('http://localhost:8080/contexts/context_imgeo.jsonld')\nj2['@context'].append('http://localhost:8080/contexts/context_cityjson.jsonld')\n<assignment token>\nfor cid in j['CityObjects']:\n pos = cid.rfind('_')\n uids.add(cid[:pos])\nfor uid in uids:\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_s']['geometry'][0])\n j2['CityObjects'][uid + '_k']['geometry'].append(j2['CityObjects'][uid +\n '_w']['geometry'][0])\n j2['CityObjects'][uid + '_k']['type'] = 'SolitaryVegetationObject'\n j2['CityObjects'][uid + '_k']['attributes']['class'] = {}\n j2['CityObjects'][uid + '_k']['attributes']['class'] = 'VegetatieObject'\n j2['CityObjects'][uid + '_k']['attributes']['function'] = 'Boom'\n del j2['CityObjects'][uid + '_s']\n del j2['CityObjects'][uid + '_w']\n<assignment token>\nfout.write(json_str)\nprint('Done.')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,558 |
da4023649130fef05c6627ac6caecfb71fe316b9
|
#!/usr/bin/env python
import numpy as np
from ImageTools import cutouts
from pygoods import sextractor, Ftable
drz_u = '/Users/khuang/CANDELS/goodss/mosaics/vimos_u/tfit_015_025_sqr_6_bg1.fits'
drz_f435w = '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f435w_60mas_v3.0_drz.fits'
drz_f606w = '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits'
drz_f098m = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_f098m_060mas_v0.5_drz.fits'
drz_f105w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f105w_060mas_v0.5_drz.fits'
drz_f160w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f160w_v0.5_drz.fits'
seg_f160w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_sx_h_120604_hphotom_comb_seg_psfmatch2h.fits'
images = [drz_f435w, drz_f606w, drz_f098m, drz_f105w, drz_f160w, seg_f160w]
filters = ['f435w', 'f606w', 'f098m', 'f105w', 'f160w', 'seg_f160w']
catalog_udrops = '/Users/khuang/Dropbox/Research/bivariate/udrops_sample/gds_udrops_all_140313.fits'
class UdropsCutouts(cutouts.Cutouts):
def __init__(self, images=images, filters=filters, catalog=catalog_udrops, format='fits', objid='objid'):
super(UdropsCutouts, self).__init__(images, filters)
self.use_catalog(catalog, format=format, objid=objid)
def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format='fits'):
if format.lower() == 'fits':
self.c = Ftable(catalog)
self.Nc = len(self.c.d)
else:
self.c = sextractor(catalog)
self.Nc = len(self.c)
self.objid = getattr(self.c, objid)
self.ra = getattr(self.c, ra)
self.dec = getattr(self.c, dec)
def cut_objid(self, objid, width):
# Make cutouts using object ID.
assert objid in self.objid, "Object ID %d not found." % objid
ra = self.ra[self.objid==objid][0]
dec = self.dec[self.objid==objid][0]
name = 'obj%d' % objid
self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)
|
[
"#!/usr/bin/env python\n\nimport numpy as np\nfrom ImageTools import cutouts\nfrom pygoods import sextractor, Ftable\n\ndrz_u = '/Users/khuang/CANDELS/goodss/mosaics/vimos_u/tfit_015_025_sqr_6_bg1.fits'\ndrz_f435w = '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f435w_60mas_v3.0_drz.fits'\ndrz_f606w = '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits'\ndrz_f098m = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_f098m_060mas_v0.5_drz.fits'\ndrz_f105w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f105w_060mas_v0.5_drz.fits'\ndrz_f160w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f160w_v0.5_drz.fits'\nseg_f160w = '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_sx_h_120604_hphotom_comb_seg_psfmatch2h.fits'\nimages = [drz_f435w, drz_f606w, drz_f098m, drz_f105w, drz_f160w, seg_f160w]\nfilters = ['f435w', 'f606w', 'f098m', 'f105w', 'f160w', 'seg_f160w']\ncatalog_udrops = '/Users/khuang/Dropbox/Research/bivariate/udrops_sample/gds_udrops_all_140313.fits'\n\nclass UdropsCutouts(cutouts.Cutouts):\n def __init__(self, images=images, filters=filters, catalog=catalog_udrops, format='fits', objid='objid'):\n super(UdropsCutouts, self).__init__(images, filters)\n self.use_catalog(catalog, format=format, objid=objid)\n\n def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format='fits'):\n if format.lower() == 'fits':\n self.c = Ftable(catalog)\n self.Nc = len(self.c.d)\n else:\n self.c = sextractor(catalog)\n self.Nc = len(self.c)\n self.objid = getattr(self.c, objid)\n self.ra = getattr(self.c, ra)\n self.dec = getattr(self.c, dec)\n\n def cut_objid(self, objid, width):\n # Make cutouts using object ID.\n assert objid in self.objid, \"Object ID %d not found.\" % objid\n ra = self.ra[self.objid==objid][0]\n dec = self.dec[self.objid==objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"import numpy as np\nfrom ImageTools import cutouts\nfrom pygoods import sextractor, Ftable\ndrz_u = (\n '/Users/khuang/CANDELS/goodss/mosaics/vimos_u/tfit_015_025_sqr_6_bg1.fits')\ndrz_f435w = (\n '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f435w_60mas_v3.0_drz.fits'\n )\ndrz_f606w = (\n '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits'\n )\ndrz_f098m = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_f098m_060mas_v0.5_drz.fits'\n )\ndrz_f105w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f105w_060mas_v0.5_drz.fits'\n )\ndrz_f160w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f160w_v0.5_drz.fits'\n )\nseg_f160w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_sx_h_120604_hphotom_comb_seg_psfmatch2h.fits'\n )\nimages = [drz_f435w, drz_f606w, drz_f098m, drz_f105w, drz_f160w, seg_f160w]\nfilters = ['f435w', 'f606w', 'f098m', 'f105w', 'f160w', 'seg_f160w']\ncatalog_udrops = (\n '/Users/khuang/Dropbox/Research/bivariate/udrops_sample/gds_udrops_all_140313.fits'\n )\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n\n def __init__(self, images=images, filters=filters, catalog=\n catalog_udrops, format='fits', objid='objid'):\n super(UdropsCutouts, self).__init__(images, filters)\n self.use_catalog(catalog, format=format, objid=objid)\n\n def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format=\n 'fits'):\n if format.lower() == 'fits':\n self.c = Ftable(catalog)\n self.Nc = len(self.c.d)\n else:\n self.c = sextractor(catalog)\n self.Nc = len(self.c)\n self.objid = getattr(self.c, objid)\n self.ra = getattr(self.c, ra)\n self.dec = getattr(self.c, dec)\n\n def cut_objid(self, objid, width):\n assert objid in self.objid, 'Object ID %d not found.' % objid\n ra = self.ra[self.objid == objid][0]\n dec = self.dec[self.objid == objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"<import token>\ndrz_u = (\n '/Users/khuang/CANDELS/goodss/mosaics/vimos_u/tfit_015_025_sqr_6_bg1.fits')\ndrz_f435w = (\n '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f435w_60mas_v3.0_drz.fits'\n )\ndrz_f606w = (\n '/Users/khuang/CANDELS/goodss/mosaics/goods_s_acs_v3/gs_presm4_all_acs_f606w_60mas_v3.0_drz.fits'\n )\ndrz_f098m = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_f098m_060mas_v0.5_drz.fits'\n )\ndrz_f105w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f105w_060mas_v0.5_drz.fits'\n )\ndrz_f160w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_candels_ers_udf_f160w_v0.5_drz.fits'\n )\nseg_f160w = (\n '/Users/khuang/CANDELS/goodss/mosaics/all_combined_v0.5/gs_all_sx_h_120604_hphotom_comb_seg_psfmatch2h.fits'\n )\nimages = [drz_f435w, drz_f606w, drz_f098m, drz_f105w, drz_f160w, seg_f160w]\nfilters = ['f435w', 'f606w', 'f098m', 'f105w', 'f160w', 'seg_f160w']\ncatalog_udrops = (\n '/Users/khuang/Dropbox/Research/bivariate/udrops_sample/gds_udrops_all_140313.fits'\n )\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n\n def __init__(self, images=images, filters=filters, catalog=\n catalog_udrops, format='fits', objid='objid'):\n super(UdropsCutouts, self).__init__(images, filters)\n self.use_catalog(catalog, format=format, objid=objid)\n\n def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format=\n 'fits'):\n if format.lower() == 'fits':\n self.c = Ftable(catalog)\n self.Nc = len(self.c.d)\n else:\n self.c = sextractor(catalog)\n self.Nc = len(self.c)\n self.objid = getattr(self.c, objid)\n self.ra = getattr(self.c, ra)\n self.dec = getattr(self.c, dec)\n\n def cut_objid(self, objid, width):\n assert objid in self.objid, 'Object ID %d not found.' % objid\n ra = self.ra[self.objid == objid][0]\n dec = self.dec[self.objid == objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"<import token>\n<assignment token>\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n\n def __init__(self, images=images, filters=filters, catalog=\n catalog_udrops, format='fits', objid='objid'):\n super(UdropsCutouts, self).__init__(images, filters)\n self.use_catalog(catalog, format=format, objid=objid)\n\n def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format=\n 'fits'):\n if format.lower() == 'fits':\n self.c = Ftable(catalog)\n self.Nc = len(self.c.d)\n else:\n self.c = sextractor(catalog)\n self.Nc = len(self.c)\n self.objid = getattr(self.c, objid)\n self.ra = getattr(self.c, ra)\n self.dec = getattr(self.c, dec)\n\n def cut_objid(self, objid, width):\n assert objid in self.objid, 'Object ID %d not found.' % objid\n ra = self.ra[self.objid == objid][0]\n dec = self.dec[self.objid == objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"<import token>\n<assignment token>\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n <function token>\n\n def use_catalog(self, catalog, objid='id', ra='ra', dec='dec', format=\n 'fits'):\n if format.lower() == 'fits':\n self.c = Ftable(catalog)\n self.Nc = len(self.c.d)\n else:\n self.c = sextractor(catalog)\n self.Nc = len(self.c)\n self.objid = getattr(self.c, objid)\n self.ra = getattr(self.c, ra)\n self.dec = getattr(self.c, dec)\n\n def cut_objid(self, objid, width):\n assert objid in self.objid, 'Object ID %d not found.' % objid\n ra = self.ra[self.objid == objid][0]\n dec = self.dec[self.objid == objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"<import token>\n<assignment token>\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n <function token>\n <function token>\n\n def cut_objid(self, objid, width):\n assert objid in self.objid, 'Object ID %d not found.' % objid\n ra = self.ra[self.objid == objid][0]\n dec = self.dec[self.objid == objid][0]\n name = 'obj%d' % objid\n self.cut_radec_all(ra, dec, self.filters, width, name, norm=False)\n",
"<import token>\n<assignment token>\n\n\nclass UdropsCutouts(cutouts.Cutouts):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
99,559 |
b4c5716ba138f7b54e42d1aba0548113cc7eda16
|
from flask import Flask
from flaskext.mysql import MySQL
app = Flask(__name__)
app.config['MYSQL_DATABASE_HOST'] = 'KyrylS.mysql.pythonanywhere-services.com'
app.config['MYSQL_DATABASE_USER'] = 'KyrylS'
app.config['MYSQL_DATABASE_PASSWORD'] = 'Stukalov12'
app.config['MYSQL_DATABASE_DB'] = 'KyrylS$hrdeb_db'
mysql = MySQL()
mysql.init_app(app)
|
[
"from flask import Flask\nfrom flaskext.mysql import MySQL\n\n\napp = Flask(__name__)\n\napp.config['MYSQL_DATABASE_HOST'] = 'KyrylS.mysql.pythonanywhere-services.com'\napp.config['MYSQL_DATABASE_USER'] = 'KyrylS'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'Stukalov12'\napp.config['MYSQL_DATABASE_DB'] = 'KyrylS$hrdeb_db'\n\n\nmysql = MySQL()\nmysql.init_app(app)",
"from flask import Flask\nfrom flaskext.mysql import MySQL\napp = Flask(__name__)\napp.config['MYSQL_DATABASE_HOST'] = 'KyrylS.mysql.pythonanywhere-services.com'\napp.config['MYSQL_DATABASE_USER'] = 'KyrylS'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'Stukalov12'\napp.config['MYSQL_DATABASE_DB'] = 'KyrylS$hrdeb_db'\nmysql = MySQL()\nmysql.init_app(app)\n",
"<import token>\napp = Flask(__name__)\napp.config['MYSQL_DATABASE_HOST'] = 'KyrylS.mysql.pythonanywhere-services.com'\napp.config['MYSQL_DATABASE_USER'] = 'KyrylS'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'Stukalov12'\napp.config['MYSQL_DATABASE_DB'] = 'KyrylS$hrdeb_db'\nmysql = MySQL()\nmysql.init_app(app)\n",
"<import token>\n<assignment token>\nmysql.init_app(app)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,560 |
f709306bf36ddff74178e479430e85e1f28f524a
|
import pdb
import warnings
from collections import Counter
import numpy as np
import re
import itertools as it
from scipy.sparse import issparse, hstack
from pandas import DataFrame
from sklearn.utils import check_random_state, check_array
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array, check_X_y
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
from custom_transformers import LabelOneHotEncoder
from sklearn.preprocessing import OneHotEncoder, RobustScaler
from sklearn.linear_model import LogisticRegression
class FriedScaler(BaseEstimator, TransformerMixin):
"""FriedScaler class: Scale linear features within rule ensemble
Scales linear features within a rule ensemble
to have the same weighting as a rule according to
Friedman et al. 2005 Section 5.
Each column, $x_i$ is winsorised at `quantile` -> $x_i'$, then
standardised by multiplying by $0.4 \text{std}(x_i')$
Attributes
----------
scale: numpy.ndarray
scale factor for each variable
lower: numpy.ndarray
lower winsorisation threshold
upper: numpy.ndarray
upper winsorisation threshold
"""
def __init__(self, quantile=0.0):
"""
Parameters
----------
quantile: float
float in [0, 0.5) signifying the quantiles at which to winsorise
(`quantile` and `1-quantile`)
WARNING: If data has small variance then this may need to be
very small to avoid blowing up of scale factors
"""
self.quantile = quantile
def fit(self, X, y=None):
""" Fit scaler and return self
Winsorise `X` at `quantile` and `1-quantile`.
Scale each variable (as long as they aren't binary in
which case they are already rules).
Parameters
----------
X: numpy.ndarray
Co-variates
y: dummy arguement, optional
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
""" Fit scaler and transform input data
Winsorise `X` at `quantile` and `1-quantile`.
Scale each variable (as long as they aren't binary in
which case they are already rules).
Parameters
----------
X: numpy.ndarray
Co-variates
y: dummy arguement, optional
"""
self.scale = np.ones(X.shape[1])
self.lower = np.percentile(X, self.quantile*100, axis=0)
self.upper = np.percentile(X, (1-self.quantile)*100, axis=0)
# Winsorize at `self.quantile`
winX = X.copy()
is_lower = (winX < self.lower)
is_higher = (winX > self.upper)
for col in range(X.shape[1]):
winX[is_lower[:, col], col] = self.lower[col]
winX[is_higher[:, col], col] = self.upper[col]
num_uniq = np.unique(X[:, col]).size
if num_uniq > 2: # Don't scale binary vars
self.scale[col] = 0.4/(1e-12 + np.std(winX[:, col]))
large_scale = np.where(self.scale > 1e3)[0]
if large_scale.size > 0:
warnings.warn('Scales of {} are larger than 1e3!'.format(large_scale))
return winX*self.scale
def transform(self, X):
""" Transform input data
Winsorise `X` at pre-fitted `quantile` and `1-quantile`.
Scale each variable (as long as they aren't binary in
which case they are already rules) accorded to the already
fitted scale factors.
Parameters
----------
X: numpy.ndarray
Co-variates
y: dummy arguement, optional
"""
winX = X.copy()
is_lower = (winX <= self.lower)
is_higher = (winX >= self.upper)
for col in range(X.shape[1]):
winX[is_lower[:, col], col] = self.lower[col]
winX[is_higher[:, col], col] = self.upper[col]
return winX*self.scale
class RuleFitClassifier(BaseEstimator, ClassifierMixin):
"""Rule-Fit for binary classification
Generate an ensemble of rules using XGBoost or a sklearn
tree ensemble method, and use these (optionally with linear
features) in a L1 (or other penalised) Logistic Regression to
build a classifier.
Attributes
----------
LR: sklearn.linear_model.LogisticRegression
Regularised linear regression on ensemble of rules
feature_mask_: np.ndarray
Array of non-zero feature values
coef_: np.ndarray
LogisticRegression (`LR`) co-efficients for features in `feature_mask_`
intercept_: np.ndarray
LogisticRegression (`LR`) intercept
features: np.ndarray of str
Input feature names
features_: np.ndarray of str
Output feature names of rule ensembles (and linear features if `linear_features=True`)
"""
def __init__(self,
base_estimator=XGBClassifier(),
linear_features=True,
linear_feature_quantile=0.025,
C=1e-1,
penalty='l1',
n_estimators=10,
max_depth=5,
rand_tree_size=False,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ext_scaler=RobustScaler()):
"""
Parameters
----------
base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier
Estimator to generate rule ensemble with
linear_features: bool, default: True
If `True`: Use linear features as well as rules
linear_feature_quantile: float, default: 0.025
float in [0, 0.5) signifying the quantiles at which to winsorise
(`quantile` and `1-quantile`).
WARNING: If data has small variance then this may need to be
very small to avoid blowing up of scale factors
C: float, default: 0.1
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
penalty: {'l1', 'l2'}, default: 'l1'
Norm used in the regularisation for LogisticRegression
n_estimators: int, default: 10
Number of trees within `base_estimator`
max_depth: int, optional
Maximum tree depth of `base_estimator`
rand_tree_size: bool, optional
NOT YET IMPLEMENTED!
If `True`, randomise `max_depth` to get rules of varying lengths.
n_jobs: int, optional
The number of CPUs to use. -1 means 'all CPUs'.
verbose: int, optional
Increasing verbosity with number.
warm_start: int, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict or 'balanced', default: 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
ext_scaler: sklearn Transformer, optional
Scaling transformation to apply to linear features (before Friedman scaling)
"""
self.base_estimator = base_estimator
self.linear_features = linear_features
self.linear_feature_quantile = linear_feature_quantile
self.C = C
self.penalty = penalty
self.n_estimators = n_estimators
self.max_depth = max_depth
self.rand_tree_size = rand_tree_size
self.n_jobs = n_jobs
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
self.ext_scaler = ext_scaler
def fit(self, X, y, sample_weight=None):
""" Fit model to data
X: pandas.DataFrame or numpy.ndarray
Features
y: pandas.Series or numpy.ndarray
Target
Returns
-------
self
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def transform(self, X, y=None):
""" Transform data into modified features
(before being passed to penalised regression step).
If `linear_features=True` then this will be scaled linear features
followed by the one-hot-encoding signifying which rules are "on".
Otherwise this is just the one-hot-encoding signifying which rules are "on".
X: pandas.DataFrame or numpy.ndarray
Features
y: dummy, optional
Returns
-------
sparse array
"""
if isinstance(X, DataFrame):
is_df = True # Serves no purpose
X = check_array(X) # Validate input data
X = self.ext_scaler.transform(X) # Scale and centre features
if self.linear_features:
X_scale = self._scaler.transform(X) # Scale linear features to give same a priori weight as rules
return hstack([X_scale, self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])
else:
return self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))
def fit_transform(self, X, y, sample_weight=None):
""" Fit and Transform data into modified features
(before being passed to penalised regression step).
If `linear_features=True` then this will be scaled linear features
followed by the one-hot-encoding signifying which rules are "on".
Otherwise this is just the one-hot-encoding signifying which rules are "on".
Fitting process involves fitted bagged/boosted tree model to generate rules
and then using these in a penalised logistic regression.
X: pandas.DataFrame or numpy.ndarray
Features
y: pandas.Series or numpy.ndarray
Target
Returns
-------
sparse array
"""
# Instantiate rule ensemble generator and set parameters
if isinstance(self.base_estimator, XGBClassifier):
self.base_estimator.set_params(n_estimators=self.n_estimators, silent=(self.verbose>0),
max_depth=self.max_depth, n_jobs=self.n_jobs)
elif isinstance(self.base_estimator, RandomForestClassifier):
warnings.warn('This base_estimator implementation has not been tested in a while!')
self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,
max_depth=self.max_depth, n_jobs=self.n_jobs)
elif isinstance(self.base_estimator, GradientBoostingClassifier):
warnings.warn('This base_estimator implementation has not been tested in a while!')
self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,
max_depth=self.max_depth, n_jobs=self.n_jobs)
else:
raise NotImplementedError
# Name features
if isinstance(X, DataFrame):
self.features = X.columns.values
else:
self.features = ['f'+str(i) for i in range(X.shape[1])]
# Check input
X = check_array(X)
# Generate and extract rules
if not self.rand_tree_size:
self.base_estimator.fit(X, y, sample_weight=sample_weight)
if isinstance(self.base_estimator, XGBClassifier):
self._rule_dump = self.base_estimator._Booster.get_dump()
else:
NotImplementedError() # TODO: work out how to incrementally train XGB
if self.verbose > 0:
print('fitting trees')
# For each tree: get leaf numbers and map them to [0, num leaves]
# before one-hot encoding them
n_values = "auto"
leaves_l = []
for tree_i in self._rule_dump:
leaves = [int(i) for i in re.findall(r'([0-9]+):leaf=', tree_i)]
leaves_l.append(leaves)
self._one_hot_encoder = LabelOneHotEncoder(leaves_l)
if self.verbose > 0:
print('setup encoding')
# Scale and centre linear features
X = self.ext_scaler.fit_transform(X)
if self.linear_features:
# Linear features must be scaled to have same weighting as an average rule
self._scaler = FriedScaler(quantile=self.linear_feature_quantile)
X_scale = self._scaler.fit_transform(X)
X_transform = hstack([X_scale, self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])
else:
X_transform = self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))
if self.verbose > 0:
print('encoded')
# Fit sparse linear model to rules (and optionally linear features)
self.LR = LogisticRegression(C=self.C, penalty=self.penalty, class_weight=self.class_weight,
warm_start=self.warm_start, solver='saga', verbose=self.verbose)
self.LR.fit(X_transform, y, sample_weight=sample_weight)
if self.verbose > 0:
print('fitted')
# Mask features with zero co-efficients
# self.feature_mask_ = np.arange(self.LR.coef_.size)
self.feature_mask_ = self.LR.coef_.nonzero()[1]
self.coef_ = self.LR.coef_[0, self.feature_mask_]
self.intercept_ = self.LR.intercept_
self.get_feature_names()
assert self.features_.size == self.feature_mask_.size
return X_transform
def get_feature_names(self):
""" Get names of features in the model
Returns
-------
numpy.ndarray
"""
if self.linear_features:
self.features_ = np.concatenate([self.features, np.array(self.extract_rules(labels=self.features))], 0)[self.feature_mask_]
else:
self.features_ = np.array(self.extract_rules(labels=self.features))[self.feature_mask_]
return self.features_
def predict(self, X):
""" Output model prediction
Parameters
----------
X: pandas.DataFrame or numpy.ndarray
Returns
-------
np.ndarray
Bool predictions
"""
return self.LR.predict(self.transform(X))
def predict_proba(self, X):
""" Output model prediction probability
Parameters
----------
X: pandas.DataFrame or numpy.ndarray
Returns
-------
np.ndarray
Probabilistic predictions
"""
return self.LR.predict_proba(self.transform(X))
def __extract_xgb_dt_rules__(self, dt):
""" Extract rule set from single decision tree according
to `XGBClassifier` format
Parameters
----------
dt: string
Returns
-------
list of numpy.ndarray
Each array is of length three.
First indicates feature number,
Second indicates operator (1 if $>$ otherwise $\leq$),
Third indicates threshold value
"""
md = self.max_depth + 1 # upper limit of max_depth?
rules = []
levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id)
path = []
# Extract feature numbers and thresholds for all nodes
feat_thresh_l = re.findall(r'\[f([0-9]+)<([-]?[0-9]+\.?[0-9]*)\]', dt)
_id = 0
prune = -1
for line in dt.split('\n')[:-1]:
# Separate node id and rest of line
_id, rest = line.split(':')
# Count number of tabs at start of line to get level (and then remove)
level = Counter(_id)['\t']
_id = _id.lstrip()
if prune > 0:
# If we were last at a leaf, prune the path
path = path[:-1+(level-prune)]
# Add current node to path
path.append(int(_id))
if 'leaf' in rest:
prune = level # Store where we are so we can prune when we backtrack
rules.append(levels[:level, (0, 2, 1)].copy()) # Add rules
rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:]) # Convert path to geq/leq operators
else:
# Extract (feature name, threshold, next node id)
levels[level, :] = re.findall(r'\[f([0-9]+)<([-]?[0-9]+\.?[0-9]*)\].*yes=([0-9]+)', line)[0]
# Don't prune
prune = -1
return rules
def __extract_dt_rules__(self, dt):
""" Extract rule set from single decision tree according
to sklearn binary-tree format
Parameters
----------
dt: string
Returns
-------
list of numpy.ndarray
Each array is of length three.
First indicates feature number,
Second indicates operator (1 if $>$ otherwise $\leq$),
Third indicates threshold value
"""
t = dt.tree_ # Get tree object
rules = []
stack = [(0, -1, -1)] # (node id, parent depth, true[<=thresh]/false[>thresh] arm)
path = [(0, -1, -1)] # Begin path at root
while len(stack) > 0: # While nodes to visit is not empty
nid, pd, op = stack.pop() # Get next node id, path depth, operator
if (pd > path[-1][1]): # Going deeper
path.append((nid, pd, op))
elif pd == -1: # ROOT
pass
else: # Back-track
[path.pop() for _ in range(path[-1][1]-pd+1)]
path.append((nid, pd, op))
if t.children_left[nid] > 0: # If not leaf, add children onto stack
stack.append((t.children_left[nid], pd + 1, 1))
stack.append((t.children_right[nid], pd + 1, 0))
else: # If leaf append rule
rules.append(np.array([(t.feature[path[i][0]], path[i+1][2], t.threshold[path[i][0]]) for i in range(len(path)-1)]))
return rules
def __convert_rule__(self, x, labels=None, scaler=None):
"""Convert rule represented by an array to readable format
Parameters
----------
x: numpy.ndarray
Input array where each row represents a feature in a rule.
3 columns:
First indicates feature number,
Second indicates operator (1 if $>$ otherwise $\leq$),
Third indicates threshold value
labels: list of str, optional
Names of features to replace feature numbers with
scaler:
Scaler to reverse scaling done in fitting so interpretable
feature values can be used.
Returns
-------
list of str
List containing each stage of input rule
"""
strop = ['>', '<=']
if scaler is None:
# If no scaler, do not shift or scale
nf = x[:, 0].astype(int).max()+1
scale = np.ones(nf)
center = np.zeros(nf)
else:
scale = scaler.scale_
center = scaler.center_
if labels is None:
return [(str(int(f)) + str(strop[int(op)]) + str(thresh*scale[int(f)]+center[int(f)])) for f, op, thresh in x]
else:
return [(labels[int(f)] + str(strop[int(op)]) + str(thresh*scale[int(f)]+center[int(f)])) for f, op, thresh in x]
def extract_rules(self, labels=None):
"""Extract rules from `base_estimator`
Parameters
----------
labels: list of str, optional
Feature names
Returns
-------
numpy.ndarray
Containing `str` representing rules in ensembles
"""
# Extract flat list of rules in array form
if isinstance(self.base_estimator, RandomForestClassifier):
rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))
elif isinstance(self.base_estimator, GradientBoostingClassifier):
rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))
elif isinstance(self.base_estimator, XGBClassifier):
rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))
# Convert each sub-rule into text, join together with '&' and then add to rules
self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])
return self.rules
|
[
"import pdb\nimport warnings\n\nfrom collections import Counter\nimport numpy as np\nimport re\nimport itertools as it\nfrom scipy.sparse import issparse, hstack\nfrom pandas import DataFrame\n\nfrom sklearn.utils import check_random_state, check_array\nfrom sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted, check_array, check_X_y\n\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom xgboost.sklearn import XGBClassifier\nfrom custom_transformers import LabelOneHotEncoder\nfrom sklearn.preprocessing import OneHotEncoder, RobustScaler\nfrom sklearn.linear_model import LogisticRegression\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n \"\"\"FriedScaler class: Scale linear features within rule ensemble\n \n Scales linear features within a rule ensemble\n to have the same weighting as a rule according to\n Friedman et al. 2005 Section 5.\n \n Each column, $x_i$ is winsorised at `quantile` -> $x_i'$, then \n standardised by multiplying by $0.4 \\text{std}(x_i')$\n \n Attributes\n ----------\n \n scale: numpy.ndarray \n scale factor for each variable\n \n lower: numpy.ndarray\n lower winsorisation threshold\n \n upper: numpy.ndarray\n upper winsorisation threshold\n \n \"\"\"\n \n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n \n def fit(self, X, y=None):\n \"\"\" Fit scaler and return self\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.fit_transform(X, y)\n return self\n \n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile*100, axis=0)\n self.upper = np.percentile(X, (1-self.quantile)*100, axis=0)\n \n # Winsorize at `self.quantile`\n winX = X.copy()\n is_lower = (winX < self.lower)\n is_higher = (winX > self.upper)\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n \n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2: # Don't scale binary vars\n self.scale[col] = 0.4/(1e-12 + np.std(winX[:, col]))\n \n large_scale = np.where(self.scale > 1e3)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(large_scale))\n \n return winX*self.scale\n \n def transform(self, X):\n \"\"\" Transform input data\n \n Winsorise `X` at pre-fitted `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules) accorded to the already\n fitted scale factors.\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n winX = X.copy()\n is_lower = (winX <= self.lower)\n is_higher = (winX >= self.upper)\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n return winX*self.scale\n \nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n \n def __init__(self, \n base_estimator=XGBClassifier(),\n linear_features=True,\n linear_feature_quantile=0.025,\n C=1e-1,\n penalty='l1',\n n_estimators=10,\n max_depth=5,\n rand_tree_size=False,\n sparse_output=True,\n n_jobs=1,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n \n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n \n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n \n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n \n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True # Serves no purpose \n \n X = check_array(X) # Validate input data\n \n X = self.ext_scaler.transform(X) # Scale and centre features\n if self.linear_features:\n X_scale = self._scaler.transform(X) # Scale linear features to give same a priori weight as rules\n return hstack([X_scale, self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n # Instantiate rule ensemble generator and set parameters\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators, silent=(self.verbose>0),\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n else:\n raise NotImplementedError\n \n # Name features\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = ['f'+str(i) for i in range(X.shape[1])]\n \n # Check input\n X = check_array(X)\n \n # Generate and extract rules\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError() # TODO: work out how to incrementally train XGB\n \n if self.verbose > 0:\n print('fitting trees')\n \n # For each tree: get leaf numbers and map them to [0, num leaves]\n # before one-hot encoding them\n n_values = \"auto\"\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall(r'([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n \n if self.verbose > 0:\n print('setup encoding')\n \n # Scale and centre linear features\n X = self.ext_scaler.fit_transform(X)\n \n if self.linear_features:\n # Linear features must be scaled to have same weighting as an average rule\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))\n \n if self.verbose > 0:\n print('encoded')\n \n # Fit sparse linear model to rules (and optionally linear features)\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty, class_weight=self.class_weight,\n warm_start=self.warm_start, solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n \n if self.verbose > 0:\n print('fitted')\n \n # Mask features with zero co-efficients\n # self.feature_mask_ = np.arange(self.LR.coef_.size)\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n \n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n \n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features))[self.feature_mask_]\n return self.features_\n \n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n \n return self.LR.predict(self.transform(X))\n \n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n \n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\leq$),\n Third indicates threshold value\n \n \"\"\" \n md = self.max_depth + 1 # upper limit of max_depth?\n rules = []\n levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id)\n path = []\n\n # Extract feature numbers and thresholds for all nodes\n feat_thresh_l = re.findall(r'\\[f([0-9]+)<([-]?[0-9]+\\.?[0-9]*)\\]', dt)\n\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n # Separate node id and rest of line\n _id, rest = line.split(':')\n\n # Count number of tabs at start of line to get level (and then remove)\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n\n if prune > 0:\n # If we were last at a leaf, prune the path\n path = path[:-1+(level-prune)]\n # Add current node to path\n path.append(int(_id))\n\n if 'leaf' in rest:\n prune = level # Store where we are so we can prune when we backtrack\n rules.append(levels[:level, (0, 2, 1)].copy()) # Add rules\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:]) # Convert path to geq/leq operators\n else:\n # Extract (feature name, threshold, next node id)\n levels[level, :] = re.findall(r'\\[f([0-9]+)<([-]?[0-9]+\\.?[0-9]*)\\].*yes=([0-9]+)', line)[0]\n # Don't prune\n prune = -1\n\n return rules\n\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\leq$),\n Third indicates threshold value\n \n \"\"\" \n t = dt.tree_ # Get tree object\n rules = []\n\n stack = [(0, -1, -1)] # (node id, parent depth, true[<=thresh]/false[>thresh] arm)\n path = [(0, -1, -1)] # Begin path at root\n while len(stack) > 0: # While nodes to visit is not empty\n nid, pd, op = stack.pop() # Get next node id, path depth, operator\n\n if (pd > path[-1][1]): # Going deeper\n path.append((nid, pd, op))\n elif pd == -1: # ROOT\n pass\n else: # Back-track\n [path.pop() for _ in range(path[-1][1]-pd+1)]\n path.append((nid, pd, op))\n\n if t.children_left[nid] > 0: # If not leaf, add children onto stack\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else: # If leaf append rule\n rules.append(np.array([(t.feature[path[i][0]], path[i+1][2], t.threshold[path[i][0]]) for i in range(len(path)-1)]))\n\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n\n if scaler is None:\n # If no scaler, do not shift or scale\n nf = x[:, 0].astype(int).max()+1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh*scale[int(f)]+center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh*scale[int(f)]+center[int(f)])) for f, op, thresh in x]\n \n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules\n",
"import pdb\nimport warnings\nfrom collections import Counter\nimport numpy as np\nimport re\nimport itertools as it\nfrom scipy.sparse import issparse, hstack\nfrom pandas import DataFrame\nfrom sklearn.utils import check_random_state, check_array\nfrom sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted, check_array, check_X_y\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom xgboost.sklearn import XGBClassifier\nfrom custom_transformers import LabelOneHotEncoder\nfrom sklearn.preprocessing import OneHotEncoder, RobustScaler\nfrom sklearn.linear_model import LogisticRegression\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n \"\"\"FriedScaler class: Scale linear features within rule ensemble\n \n Scales linear features within a rule ensemble\n to have the same weighting as a rule according to\n Friedman et al. 2005 Section 5.\n \n Each column, $x_i$ is winsorised at `quantile` -> $x_i'$, then \n standardised by multiplying by $0.4 \text{std}(x_i')$\n \n Attributes\n ----------\n \n scale: numpy.ndarray \n scale factor for each variable\n \n lower: numpy.ndarray\n lower winsorisation threshold\n \n upper: numpy.ndarray\n upper winsorisation threshold\n \n \"\"\"\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n\n def fit(self, X, y=None):\n \"\"\" Fit scaler and return self\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.fit_transform(X, y)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile * 100, axis=0)\n self.upper = np.percentile(X, (1 - self.quantile) * 100, axis=0)\n winX = X.copy()\n is_lower = winX < self.lower\n is_higher = winX > self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2:\n self.scale[col] = 0.4 / (1e-12 + np.std(winX[:, col]))\n large_scale = np.where(self.scale > 1000.0)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(\n large_scale))\n return winX * self.scale\n\n def transform(self, X):\n \"\"\" Transform input data\n \n Winsorise `X` at pre-fitted `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules) accorded to the already\n fitted scale factors.\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n winX = X.copy()\n is_lower = winX <= self.lower\n is_higher = winX >= self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n return winX * self.scale\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n \"\"\"FriedScaler class: Scale linear features within rule ensemble\n \n Scales linear features within a rule ensemble\n to have the same weighting as a rule according to\n Friedman et al. 2005 Section 5.\n \n Each column, $x_i$ is winsorised at `quantile` -> $x_i'$, then \n standardised by multiplying by $0.4 \text{std}(x_i')$\n \n Attributes\n ----------\n \n scale: numpy.ndarray \n scale factor for each variable\n \n lower: numpy.ndarray\n lower winsorisation threshold\n \n upper: numpy.ndarray\n upper winsorisation threshold\n \n \"\"\"\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n\n def fit(self, X, y=None):\n \"\"\" Fit scaler and return self\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.fit_transform(X, y)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile * 100, axis=0)\n self.upper = np.percentile(X, (1 - self.quantile) * 100, axis=0)\n winX = X.copy()\n is_lower = winX < self.lower\n is_higher = winX > self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2:\n self.scale[col] = 0.4 / (1e-12 + np.std(winX[:, col]))\n large_scale = np.where(self.scale > 1000.0)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(\n large_scale))\n return winX * self.scale\n\n def transform(self, X):\n \"\"\" Transform input data\n \n Winsorise `X` at pre-fitted `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules) accorded to the already\n fitted scale factors.\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n winX = X.copy()\n is_lower = winX <= self.lower\n is_higher = winX >= self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n return winX * self.scale\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n <docstring token>\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n\n def fit(self, X, y=None):\n \"\"\" Fit scaler and return self\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.fit_transform(X, y)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile * 100, axis=0)\n self.upper = np.percentile(X, (1 - self.quantile) * 100, axis=0)\n winX = X.copy()\n is_lower = winX < self.lower\n is_higher = winX > self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2:\n self.scale[col] = 0.4 / (1e-12 + np.std(winX[:, col]))\n large_scale = np.where(self.scale > 1000.0)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(\n large_scale))\n return winX * self.scale\n\n def transform(self, X):\n \"\"\" Transform input data\n \n Winsorise `X` at pre-fitted `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules) accorded to the already\n fitted scale factors.\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n winX = X.copy()\n is_lower = winX <= self.lower\n is_higher = winX >= self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n return winX * self.scale\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n <docstring token>\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n\n def fit(self, X, y=None):\n \"\"\" Fit scaler and return self\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.fit_transform(X, y)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile * 100, axis=0)\n self.upper = np.percentile(X, (1 - self.quantile) * 100, axis=0)\n winX = X.copy()\n is_lower = winX < self.lower\n is_higher = winX > self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2:\n self.scale[col] = 0.4 / (1e-12 + np.std(winX[:, col]))\n large_scale = np.where(self.scale > 1000.0)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(\n large_scale))\n return winX * self.scale\n <function token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n <docstring token>\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n <function token>\n\n def fit_transform(self, X, y=None):\n \"\"\" Fit scaler and transform input data\n \n Winsorise `X` at `quantile` and `1-quantile`.\n Scale each variable (as long as they aren't binary in\n which case they are already rules).\n \n Parameters\n ----------\n \n X: numpy.ndarray\n Co-variates\n \n y: dummy arguement, optional\n \"\"\"\n self.scale = np.ones(X.shape[1])\n self.lower = np.percentile(X, self.quantile * 100, axis=0)\n self.upper = np.percentile(X, (1 - self.quantile) * 100, axis=0)\n winX = X.copy()\n is_lower = winX < self.lower\n is_higher = winX > self.upper\n for col in range(X.shape[1]):\n winX[is_lower[:, col], col] = self.lower[col]\n winX[is_higher[:, col], col] = self.upper[col]\n num_uniq = np.unique(X[:, col]).size\n if num_uniq > 2:\n self.scale[col] = 0.4 / (1e-12 + np.std(winX[:, col]))\n large_scale = np.where(self.scale > 1000.0)[0]\n if large_scale.size > 0:\n warnings.warn('Scales of {} are larger than 1e3!'.format(\n large_scale))\n return winX * self.scale\n <function token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n <docstring token>\n\n def __init__(self, quantile=0.0):\n \"\"\"\n Parameters\n ----------\n \n quantile: float\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`)\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \"\"\"\n self.quantile = quantile\n <function token>\n <function token>\n <function token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n\n\nclass FriedScaler(BaseEstimator, TransformerMixin):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"Rule-Fit for binary classification\n \n Generate an ensemble of rules using XGBoost or a sklearn\n tree ensemble method, and use these (optionally with linear\n features) in a L1 (or other penalised) Logistic Regression to \n build a classifier.\n \n Attributes\n ----------\n \n LR: sklearn.linear_model.LogisticRegression\n Regularised linear regression on ensemble of rules\n \n feature_mask_: np.ndarray\n Array of non-zero feature values\n \n coef_: np.ndarray\n LogisticRegression (`LR`) co-efficients for features in `feature_mask_`\n \n intercept_: np.ndarray\n LogisticRegression (`LR`) intercept\n \n features: np.ndarray of str\n Input feature names\n \n features_: np.ndarray of str\n Output feature names of rule ensembles (and linear features if `linear_features=True`)\n \n \"\"\"\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n\n def extract_rules(self, labels=None):\n \"\"\"Extract rules from `base_estimator`\n \n Parameters\n ----------\n \n labels: list of str, optional\n Feature names\n \n Returns\n -------\n \n numpy.ndarray\n Containing `str` representing rules in ensembles\n \n \"\"\"\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in\n self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in\n self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for\n dt in self._rule_dump]))\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=\n labels, scaler=self.ext_scaler)) for r in rules])\n return self.rules\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n\n def predict_proba(self, X):\n \"\"\" Output model prediction probability\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Probabilistic predictions\n \"\"\"\n return self.LR.predict_proba(self.transform(X))\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n\n def __init__(self, base_estimator=XGBClassifier(), linear_features=True,\n linear_feature_quantile=0.025, C=0.1, penalty='l1', n_estimators=10,\n max_depth=5, rand_tree_size=False, sparse_output=True, n_jobs=1,\n random_state=None, verbose=0, warm_start=False, class_weight=None,\n ext_scaler=RobustScaler()):\n \"\"\"\n Parameters\n ----------\n \n base_estimator: sklearn estimator, default: xgboost.sklearn.XGBClassifier\n Estimator to generate rule ensemble with\n \n linear_features: bool, default: True\n If `True`: Use linear features as well as rules\n \n linear_feature_quantile: float, default: 0.025\n float in [0, 0.5) signifying the quantiles at which to winsorise\n (`quantile` and `1-quantile`).\n WARNING: If data has small variance then this may need to be \n very small to avoid blowing up of scale factors\n \n C: float, default: 0.1\n Inverse of regularization strength; must be a positive float.\n Like in support vector machines, smaller values specify stronger\n regularization.\n \n \n penalty: {'l1', 'l2'}, default: 'l1'\n Norm used in the regularisation for LogisticRegression\n \n n_estimators: int, default: 10\n Number of trees within `base_estimator`\n \n max_depth: int, optional\n Maximum tree depth of `base_estimator`\n \n rand_tree_size: bool, optional\n NOT YET IMPLEMENTED!\n If `True`, randomise `max_depth` to get rules of varying lengths.\n \n n_jobs: int, optional\n The number of CPUs to use. -1 means 'all CPUs'.\n \n verbose: int, optional\n Increasing verbosity with number.\n \n warm_start: int, optional\n When set to True, reuse the solution of the previous call to fit as\n initialization, otherwise, just erase the previous solution.\n \n class_weight : dict or 'balanced', default: 'balanced'\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n\n ext_scaler: sklearn Transformer, optional\n Scaling transformation to apply to linear features (before Friedman scaling)\n \n \"\"\"\n self.base_estimator = base_estimator\n self.linear_features = linear_features\n self.linear_feature_quantile = linear_feature_quantile\n self.C = C\n self.penalty = penalty\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.rand_tree_size = rand_tree_size\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.ext_scaler = ext_scaler\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n\n def fit_transform(self, X, y, sample_weight=None):\n \"\"\" Fit and Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n Fitting process involves fitted bagged/boosted tree model to generate rules\n and then using these in a penalised logistic regression.\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n silent=self.verbose > 0, max_depth=self.max_depth, n_jobs=\n self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn(\n 'This base_estimator implementation has not been tested in a while!'\n )\n self.base_estimator.set_params(n_estimators=self.n_estimators,\n verbose=self.verbose, max_depth=self.max_depth, n_jobs=self\n .n_jobs)\n else:\n raise NotImplementedError\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = [('f' + str(i)) for i in range(X.shape[1])]\n X = check_array(X)\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError()\n if self.verbose > 0:\n print('fitting trees')\n n_values = 'auto'\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall('([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n if self.verbose > 0:\n print('setup encoding')\n X = self.ext_scaler.fit_transform(X)\n if self.linear_features:\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.\n fit_transform(self.base_estimator.apply(X).reshape(-1, self\n .n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))\n if self.verbose > 0:\n print('encoded')\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty,\n class_weight=self.class_weight, warm_start=self.warm_start,\n solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n if self.verbose > 0:\n print('fitted')\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n <function token>\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n\n def predict(self, X):\n \"\"\" Output model prediction\n \n Parameters\n ----------\n \n X: pandas.DataFrame or numpy.ndarray\n \n Returns\n -------\n \n np.ndarray\n Bool predictions\n \"\"\"\n return self.LR.predict(self.transform(X))\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n <function token>\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n\n def __extract_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to sklearn binary-tree format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n t = dt.tree_\n rules = []\n stack = [(0, -1, -1)]\n path = [(0, -1, -1)]\n while len(stack) > 0:\n nid, pd, op = stack.pop()\n if pd > path[-1][1]:\n path.append((nid, pd, op))\n elif pd == -1:\n pass\n else:\n [path.pop() for _ in range(path[-1][1] - pd + 1)]\n path.append((nid, pd, op))\n if t.children_left[nid] > 0:\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else:\n rules.append(np.array([(t.feature[path[i][0]], path[i + 1][\n 2], t.threshold[path[i][0]]) for i in range(len(path) -\n 1)]))\n return rules\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n <function token>\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n <function token>\n\n def __convert_rule__(self, x, labels=None, scaler=None):\n \"\"\"Convert rule represented by an array to readable format\n \n Parameters\n ----------\n \n x: numpy.ndarray\n Input array where each row represents a feature in a rule.\n 3 columns:\n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n labels: list of str, optional\n Names of features to replace feature numbers with\n \n scaler:\n Scaler to reverse scaling done in fitting so interpretable\n feature values can be used.\n \n Returns\n -------\n \n list of str\n List containing each stage of input rule\n \n \"\"\"\n strop = ['>', '<=']\n if scaler is None:\n nf = x[:, 0].astype(int).max() + 1\n scale = np.ones(nf)\n center = np.zeros(nf)\n else:\n scale = scaler.scale_\n center = scaler.center_\n if labels is None:\n return [(str(int(f)) + str(strop[int(op)]) + str(thresh * scale\n [int(f)] + center[int(f)])) for f, op, thresh in x]\n else:\n return [(labels[int(f)] + str(strop[int(op)]) + str(thresh *\n scale[int(f)] + center[int(f)])) for f, op, thresh in x]\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n\n def transform(self, X, y=None):\n \"\"\" Transform data into modified features\n (before being passed to penalised regression step).\n If `linear_features=True` then this will be scaled linear features\n followed by the one-hot-encoding signifying which rules are \"on\".\n Otherwise this is just the one-hot-encoding signifying which rules are \"on\".\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: dummy, optional\n \n Returns\n -------\n \n sparse array\n \"\"\"\n if isinstance(X, DataFrame):\n is_df = True\n X = check_array(X)\n X = self.ext_scaler.transform(X)\n if self.linear_features:\n X_scale = self._scaler.transform(X)\n return hstack([X_scale, self._one_hot_encoder.transform(self.\n base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.\n apply(X).reshape(-1, self.n_estimators))\n <function token>\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n <function token>\n <function token>\n\n def get_feature_names(self):\n \"\"\" Get names of features in the model\n \n Returns\n -------\n \n numpy.ndarray\n \"\"\"\n if self.linear_features:\n self.features_ = np.concatenate([self.features, np.array(self.\n extract_rules(labels=self.features))], 0)[self.feature_mask_]\n else:\n self.features_ = np.array(self.extract_rules(labels=self.features)\n )[self.feature_mask_]\n return self.features_\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n\n def fit(self, X, y, sample_weight=None):\n \"\"\" Fit model to data\n \n X: pandas.DataFrame or numpy.ndarray\n Features\n \n y: pandas.Series or numpy.ndarray\n Target\n \n Returns\n -------\n \n self\n \"\"\"\n self.fit_transform(X, y, sample_weight=sample_weight)\n return self\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __extract_xgb_dt_rules__(self, dt):\n \"\"\" Extract rule set from single decision tree according\n to `XGBClassifier` format\n \n Parameters\n ----------\n \n dt: string\n \n Returns\n -------\n \n list of numpy.ndarray\n Each array is of length three. \n First indicates feature number,\n Second indicates operator (1 if $>$ otherwise $\\\\leq$),\n Third indicates threshold value\n \n \"\"\"\n md = self.max_depth + 1\n rules = []\n levels = np.zeros((md, 3))\n path = []\n feat_thresh_l = re.findall('\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\]', dt\n )\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n _id, rest = line.split(':')\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n if prune > 0:\n path = path[:-1 + (level - prune)]\n path.append(int(_id))\n if 'leaf' in rest:\n prune = level\n rules.append(levels[:level, (0, 2, 1)].copy())\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:])\n else:\n levels[level, :] = re.findall(\n '\\\\[f([0-9]+)<([-]?[0-9]+\\\\.?[0-9]*)\\\\].*yes=([0-9]+)',\n line)[0]\n prune = -1\n return rules\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass RuleFitClassifier(BaseEstimator, ClassifierMixin):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,561 |
46aebea7b07226530e7f54835e27258b805144c1
|
#Create a program that prints all the integers between 3 and 13 (3 and 13 included) multiplied by two.
for x in range(3, 14):
print("x is now:",x)
print("x multiplied by 2:",x*2)
|
[
"#Create a program that prints all the integers between 3 and 13 (3 and 13 included) multiplied by two.\n\nfor x in range(3, 14):\n print(\"x is now:\",x)\n print(\"x multiplied by 2:\",x*2)\n",
"for x in range(3, 14):\n print('x is now:', x)\n print('x multiplied by 2:', x * 2)\n",
"<code token>\n"
] | false |
99,562 |
d398e63ebeae2e337631e6fbdf12957f366fae72
|
# SPDX-License-Identifier: BSD-2-Clause
"""osdk-manager osdk update tests.
Manage osdk and opm binary installation, and help to scaffold, release, and
version Operator SDK-based Kubernetes operators.
This test set validates that an update correctly installs and validates the
latest version of the operator-sdk binaries, but can also be used to pin a
version.
"""
import os
from osdk_manager.util import get_logger
import osdk_manager.osdk.update as osdk_update
osdk_update._called_from_test = True
def test_update(tmp_path):
"""Test updates with both latest version and a pinned version."""
_ = get_logger(verbosity=4)
for osdk_version in ["latest", "1.3.1", "1.3.1"]:
version = osdk_update.osdk_update(version=osdk_version, **tmp_path)
file_data = osdk_update.OsdkFileData(version=version, **tmp_path)
assert file_data.files_not_matching() == []
for filename in file_data.downloads:
try:
os.remove(file_data.downloads[filename]['dst'])
except Exception:
pass
|
[
"# SPDX-License-Identifier: BSD-2-Clause\n\"\"\"osdk-manager osdk update tests.\n\nManage osdk and opm binary installation, and help to scaffold, release, and\nversion Operator SDK-based Kubernetes operators.\n\nThis test set validates that an update correctly installs and validates the\nlatest version of the operator-sdk binaries, but can also be used to pin a\nversion.\n\"\"\"\n\nimport os\n\nfrom osdk_manager.util import get_logger\nimport osdk_manager.osdk.update as osdk_update\nosdk_update._called_from_test = True\n\n\ndef test_update(tmp_path):\n \"\"\"Test updates with both latest version and a pinned version.\"\"\"\n _ = get_logger(verbosity=4)\n for osdk_version in [\"latest\", \"1.3.1\", \"1.3.1\"]:\n version = osdk_update.osdk_update(version=osdk_version, **tmp_path)\n file_data = osdk_update.OsdkFileData(version=version, **tmp_path)\n assert file_data.files_not_matching() == []\n for filename in file_data.downloads:\n try:\n os.remove(file_data.downloads[filename]['dst'])\n except Exception:\n pass\n",
"<docstring token>\nimport os\nfrom osdk_manager.util import get_logger\nimport osdk_manager.osdk.update as osdk_update\nosdk_update._called_from_test = True\n\n\ndef test_update(tmp_path):\n \"\"\"Test updates with both latest version and a pinned version.\"\"\"\n _ = get_logger(verbosity=4)\n for osdk_version in ['latest', '1.3.1', '1.3.1']:\n version = osdk_update.osdk_update(version=osdk_version, **tmp_path)\n file_data = osdk_update.OsdkFileData(version=version, **tmp_path)\n assert file_data.files_not_matching() == []\n for filename in file_data.downloads:\n try:\n os.remove(file_data.downloads[filename]['dst'])\n except Exception:\n pass\n",
"<docstring token>\n<import token>\nosdk_update._called_from_test = True\n\n\ndef test_update(tmp_path):\n \"\"\"Test updates with both latest version and a pinned version.\"\"\"\n _ = get_logger(verbosity=4)\n for osdk_version in ['latest', '1.3.1', '1.3.1']:\n version = osdk_update.osdk_update(version=osdk_version, **tmp_path)\n file_data = osdk_update.OsdkFileData(version=version, **tmp_path)\n assert file_data.files_not_matching() == []\n for filename in file_data.downloads:\n try:\n os.remove(file_data.downloads[filename]['dst'])\n except Exception:\n pass\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef test_update(tmp_path):\n \"\"\"Test updates with both latest version and a pinned version.\"\"\"\n _ = get_logger(verbosity=4)\n for osdk_version in ['latest', '1.3.1', '1.3.1']:\n version = osdk_update.osdk_update(version=osdk_version, **tmp_path)\n file_data = osdk_update.OsdkFileData(version=version, **tmp_path)\n assert file_data.files_not_matching() == []\n for filename in file_data.downloads:\n try:\n os.remove(file_data.downloads[filename]['dst'])\n except Exception:\n pass\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n"
] | false |
99,563 |
63f69591e1e8c4675c7eaf42d6373cceb59fb758
|
import os
from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt import JWT
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
flask_app = Flask(__name__)
flask_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + BASE_DIR + '/bucket_list.db'
flask_app.config['JWT_SECRET_KEY'] = 'test123'
db = SQLAlchemy(flask_app)
from . import views
api = Api(flask_app)
api.add_resource(views.UserRegistration, "/v1/auth/register")
api.add_resource(views.BucketList, "/v1/bucketlists")
api.add_resource(views.SingleBucketList, "/v1/bucketlists/<int:id>")
api.add_resource(views.Items, "/v1/bucketlists/<int:id>/items")
api.add_resource(views.ItemsUpdate, "/v1/bucketlists/<int:id>/items/<int:item_id>")
jwt = JWT(flask_app, views.authenticate, views.identity)
|
[
"import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_jwt import JWT\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nflask_app = Flask(__name__)\nflask_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + BASE_DIR + '/bucket_list.db'\nflask_app.config['JWT_SECRET_KEY'] = 'test123'\n\n\ndb = SQLAlchemy(flask_app)\n\nfrom . import views\n\n\napi = Api(flask_app)\napi.add_resource(views.UserRegistration, \"/v1/auth/register\")\napi.add_resource(views.BucketList, \"/v1/bucketlists\")\napi.add_resource(views.SingleBucketList, \"/v1/bucketlists/<int:id>\")\napi.add_resource(views.Items, \"/v1/bucketlists/<int:id>/items\")\napi.add_resource(views.ItemsUpdate, \"/v1/bucketlists/<int:id>/items/<int:item_id>\")\njwt = JWT(flask_app, views.authenticate, views.identity)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_jwt import JWT\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nflask_app = Flask(__name__)\nflask_app.config['SQLALCHEMY_DATABASE_URI'\n ] = 'sqlite:///' + BASE_DIR + '/bucket_list.db'\nflask_app.config['JWT_SECRET_KEY'] = 'test123'\ndb = SQLAlchemy(flask_app)\nfrom . import views\napi = Api(flask_app)\napi.add_resource(views.UserRegistration, '/v1/auth/register')\napi.add_resource(views.BucketList, '/v1/bucketlists')\napi.add_resource(views.SingleBucketList, '/v1/bucketlists/<int:id>')\napi.add_resource(views.Items, '/v1/bucketlists/<int:id>/items')\napi.add_resource(views.ItemsUpdate,\n '/v1/bucketlists/<int:id>/items/<int:item_id>')\njwt = JWT(flask_app, views.authenticate, views.identity)\n",
"<import token>\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nflask_app = Flask(__name__)\nflask_app.config['SQLALCHEMY_DATABASE_URI'\n ] = 'sqlite:///' + BASE_DIR + '/bucket_list.db'\nflask_app.config['JWT_SECRET_KEY'] = 'test123'\ndb = SQLAlchemy(flask_app)\n<import token>\napi = Api(flask_app)\napi.add_resource(views.UserRegistration, '/v1/auth/register')\napi.add_resource(views.BucketList, '/v1/bucketlists')\napi.add_resource(views.SingleBucketList, '/v1/bucketlists/<int:id>')\napi.add_resource(views.Items, '/v1/bucketlists/<int:id>/items')\napi.add_resource(views.ItemsUpdate,\n '/v1/bucketlists/<int:id>/items/<int:item_id>')\njwt = JWT(flask_app, views.authenticate, views.identity)\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\napi.add_resource(views.UserRegistration, '/v1/auth/register')\napi.add_resource(views.BucketList, '/v1/bucketlists')\napi.add_resource(views.SingleBucketList, '/v1/bucketlists/<int:id>')\napi.add_resource(views.Items, '/v1/bucketlists/<int:id>/items')\napi.add_resource(views.ItemsUpdate,\n '/v1/bucketlists/<int:id>/items/<int:item_id>')\n<assignment token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
99,564 |
ba15623cce2580eba159e63127917006174b7379
|
states = {"California": "CA", "Arizona": "AZ", "Arkansas": "AK"}
for state in states:
print("State: " + state + " Abbreviation: " + states[state])
store_prices = {"Cereal": 2.00, "Bread": 4.00, "fiber optic": 25.00, "lambo": 30.00 }
print(store_prices["Cereal"] + store_prices["lambo"])
store_inventory = {"Cereal" : 20, "Bread": 30, "fiber optic": 40, "lambo": 2}
price = str(2 * store_prices["Cereal"] + store_prices["lambo"])
print("The price of two boxes of cereal and one lambo is: " + price)
store_inventory["Cereal"] -= 2
store_inventory["lambo"] -= 1
print(store_inventory["Cereal"])
print(store_inventory["lambo"])
for item in store_prices:
store_prices[item] *= 1.03
for item in store_prices:
print(store_prices[item])
|
[
"\n\nstates = {\"California\": \"CA\", \"Arizona\": \"AZ\", \"Arkansas\": \"AK\"}\n\nfor state in states:\n print(\"State: \" + state + \" Abbreviation: \" + states[state])\n\n\nstore_prices = {\"Cereal\": 2.00, \"Bread\": 4.00, \"fiber optic\": 25.00, \"lambo\": 30.00 }\n\nprint(store_prices[\"Cereal\"] + store_prices[\"lambo\"])\n\n\nstore_inventory = {\"Cereal\" : 20, \"Bread\": 30, \"fiber optic\": 40, \"lambo\": 2}\n\n\nprice = str(2 * store_prices[\"Cereal\"] + store_prices[\"lambo\"])\nprint(\"The price of two boxes of cereal and one lambo is: \" + price)\n\nstore_inventory[\"Cereal\"] -= 2\nstore_inventory[\"lambo\"] -= 1\n\nprint(store_inventory[\"Cereal\"])\nprint(store_inventory[\"lambo\"])\n\nfor item in store_prices:\n store_prices[item] *= 1.03\n\nfor item in store_prices:\n print(store_prices[item])\n",
"states = {'California': 'CA', 'Arizona': 'AZ', 'Arkansas': 'AK'}\nfor state in states:\n print('State: ' + state + ' Abbreviation: ' + states[state])\nstore_prices = {'Cereal': 2.0, 'Bread': 4.0, 'fiber optic': 25.0, 'lambo': 30.0\n }\nprint(store_prices['Cereal'] + store_prices['lambo'])\nstore_inventory = {'Cereal': 20, 'Bread': 30, 'fiber optic': 40, 'lambo': 2}\nprice = str(2 * store_prices['Cereal'] + store_prices['lambo'])\nprint('The price of two boxes of cereal and one lambo is: ' + price)\nstore_inventory['Cereal'] -= 2\nstore_inventory['lambo'] -= 1\nprint(store_inventory['Cereal'])\nprint(store_inventory['lambo'])\nfor item in store_prices:\n store_prices[item] *= 1.03\nfor item in store_prices:\n print(store_prices[item])\n",
"<assignment token>\nfor state in states:\n print('State: ' + state + ' Abbreviation: ' + states[state])\n<assignment token>\nprint(store_prices['Cereal'] + store_prices['lambo'])\n<assignment token>\nprint('The price of two boxes of cereal and one lambo is: ' + price)\nstore_inventory['Cereal'] -= 2\nstore_inventory['lambo'] -= 1\nprint(store_inventory['Cereal'])\nprint(store_inventory['lambo'])\nfor item in store_prices:\n store_prices[item] *= 1.03\nfor item in store_prices:\n print(store_prices[item])\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,565 |
39906082ee13cc41aa297719096161bada97bc20
|
import numpy as np
from data_prep import features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
sigmoid_prime = sigmoid(features) * (1 - sigmoid(features))
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
n_hidden = 2
# Initialize weights
weights_input_hidden = np.random.normal(scale=1 / n_features**.5, size=( n_features, n_hidden) )
weights_hidden_output = np.random.normal(scale=1 / n_features**.5, size= n_hidden )
for e in range(epochs):
del_w_input_hidden = np.zeros(weights_input_hidden.shape)
del_w_hidden_output = np.zeros(weights_hidden_output.shape)
for x, y in zip(features.values, targets):
## Forward Pass
# TODO: Calculate the output
hidden_input = np.dot(x , weights_input_hidden )
hidden_output = sigmoid( hidden_input )
output = sigmoid(np.dot(hidden_output, weights_hidden_output))
## Backward pass ##
# TODO: Calculate the network's prediction error
error = y - output
# TODO: Calculate the error term
error_term = error * output * (1-output)
## propagate errors to hidden layer
# TODO: Calculate the error term for the hidden layer
hidden_error_term = np.dot(weights_hidden_output , error_term ) * \
hidden_output * ( 1 - hidden_output )
# TODO: Update the change in weights
del_w_hidden_output += error_term*hidden_output
del_w_input_hidden += hidden_error_term*x[:,None]
# TODO: Update weights using the learning rate and the average change in weights
weights_input_hidden += learnrate * del_w_input_hidden / n_records
weights_hidden_output += learnrate * del_w_hidden_output / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
hidden_output = sigmoid(np.dot(x, weights_input_hidden))
out = sigmoid(np.dot(hidden_output,
weights_hidden_output))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
hidden = sigmoid(np.dot(features_test, weights_input_hidden))
out = sigmoid(np.dot(hidden, weights_hidden_output))
predictions = out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
"""
Train loss: 0.22938960279764808
Train loss: 0.22202199088539817
Train loss: 0.22030177987898966
Train loss: 0.2197781809601994
Train loss: 0.21963941580650892
Train loss: 0.21965655830275446 WARNING - Loss Increasing
Train loss: 0.21978971086762186 WARNING - Loss Increasing
Train loss: 0.22006161406657188 WARNING - Loss Increasing
Train loss: 0.22050957432577717 WARNING - Loss Increasing
Train loss: 0.22116278036079903 WARNING - Loss Increasing
Prediction accuracy: 0.725
"""
|
[
"import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n# TODO: We haven't provided the sigmoid_prime function like we did in\n# the previous lesson to encourage you to come up with a more\n# efficient solution. If you need a hint, check out the comments\n# in solution.py from the previous lecture.\n\nsigmoid_prime = sigmoid(features) * (1 - sigmoid(features))\n\n# Use to same seed to make debugging easier\nnp.random.seed(42)\n\nn_records, n_features = features.shape\nlast_loss = None\n\n# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\nn_hidden = 2\n\n# Initialize weights\nweights_input_hidden = np.random.normal(scale=1 / n_features**.5, size=( n_features, n_hidden) )\nweights_hidden_output = np.random.normal(scale=1 / n_features**.5, size= n_hidden )\n\n\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n\n for x, y in zip(features.values, targets):\n \n ## Forward Pass\n # TODO: Calculate the output\n hidden_input = np.dot(x , weights_input_hidden )\n hidden_output = sigmoid( hidden_input )\n output = sigmoid(np.dot(hidden_output, weights_hidden_output))\n\n ## Backward pass ##\n # TODO: Calculate the network's prediction error\n error = y - output\n\n # TODO: Calculate the error term\n error_term = error * output * (1-output)\n\n ## propagate errors to hidden layer\n # TODO: Calculate the error term for the hidden layer\n hidden_error_term = np.dot(weights_hidden_output , error_term ) * \\\n hidden_output * ( 1 - hidden_output )\n\n # TODO: Update the change in weights\n del_w_hidden_output += error_term*hidden_output\n del_w_input_hidden += hidden_error_term*x[:,None]\n # TODO: Update weights using the learning rate and the average change in weights\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n# Calculate accuracy on test data\nhidden = sigmoid(np.dot(features_test, weights_input_hidden))\nout = sigmoid(np.dot(hidden, weights_hidden_output))\npredictions = out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))\n\n\n\"\"\"\nTrain loss: 0.22938960279764808\nTrain loss: 0.22202199088539817\nTrain loss: 0.22030177987898966\nTrain loss: 0.2197781809601994\nTrain loss: 0.21963941580650892\nTrain loss: 0.21965655830275446 WARNING - Loss Increasing\nTrain loss: 0.21978971086762186 WARNING - Loss Increasing\nTrain loss: 0.22006161406657188 WARNING - Loss Increasing\nTrain loss: 0.22050957432577717 WARNING - Loss Increasing\nTrain loss: 0.22116278036079903 WARNING - Loss Increasing\nPrediction accuracy: 0.725\n\"\"\"\n",
"import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\nsigmoid_prime = sigmoid(features) * (1 - sigmoid(features))\nnp.random.seed(42)\nn_records, n_features = features.shape\nlast_loss = None\nepochs = 1000\nlearnrate = 0.5\nn_hidden = 2\nweights_input_hidden = np.random.normal(scale=1 / n_features ** 0.5, size=(\n n_features, n_hidden))\nweights_hidden_output = np.random.normal(scale=1 / n_features ** 0.5, size=\n n_hidden)\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n for x, y in zip(features.values, targets):\n hidden_input = np.dot(x, weights_input_hidden)\n hidden_output = sigmoid(hidden_input)\n output = sigmoid(np.dot(hidden_output, weights_hidden_output))\n error = y - output\n error_term = error * output * (1 - output)\n hidden_error_term = np.dot(weights_hidden_output, error_term\n ) * hidden_output * (1 - hidden_output)\n del_w_hidden_output += error_term * hidden_output\n del_w_input_hidden += hidden_error_term * x[:, None]\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output, weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print('Train loss: ', loss, ' WARNING - Loss Increasing')\n else:\n print('Train loss: ', loss)\n last_loss = loss\nhidden = sigmoid(np.dot(features_test, weights_input_hidden))\nout = sigmoid(np.dot(hidden, weights_hidden_output))\npredictions = out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint('Prediction accuracy: {:.3f}'.format(accuracy))\n<docstring token>\n",
"<import token>\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\nsigmoid_prime = sigmoid(features) * (1 - sigmoid(features))\nnp.random.seed(42)\nn_records, n_features = features.shape\nlast_loss = None\nepochs = 1000\nlearnrate = 0.5\nn_hidden = 2\nweights_input_hidden = np.random.normal(scale=1 / n_features ** 0.5, size=(\n n_features, n_hidden))\nweights_hidden_output = np.random.normal(scale=1 / n_features ** 0.5, size=\n n_hidden)\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n for x, y in zip(features.values, targets):\n hidden_input = np.dot(x, weights_input_hidden)\n hidden_output = sigmoid(hidden_input)\n output = sigmoid(np.dot(hidden_output, weights_hidden_output))\n error = y - output\n error_term = error * output * (1 - output)\n hidden_error_term = np.dot(weights_hidden_output, error_term\n ) * hidden_output * (1 - hidden_output)\n del_w_hidden_output += error_term * hidden_output\n del_w_input_hidden += hidden_error_term * x[:, None]\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output, weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print('Train loss: ', loss, ' WARNING - Loss Increasing')\n else:\n print('Train loss: ', loss)\n last_loss = loss\nhidden = sigmoid(np.dot(features_test, weights_input_hidden))\nout = sigmoid(np.dot(hidden, weights_hidden_output))\npredictions = out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint('Prediction accuracy: {:.3f}'.format(accuracy))\n<docstring token>\n",
"<import token>\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\n<assignment token>\nnp.random.seed(42)\n<assignment token>\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n for x, y in zip(features.values, targets):\n hidden_input = np.dot(x, weights_input_hidden)\n hidden_output = sigmoid(hidden_input)\n output = sigmoid(np.dot(hidden_output, weights_hidden_output))\n error = y - output\n error_term = error * output * (1 - output)\n hidden_error_term = np.dot(weights_hidden_output, error_term\n ) * hidden_output * (1 - hidden_output)\n del_w_hidden_output += error_term * hidden_output\n del_w_input_hidden += hidden_error_term * x[:, None]\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output, weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print('Train loss: ', loss, ' WARNING - Loss Increasing')\n else:\n print('Train loss: ', loss)\n last_loss = loss\n<assignment token>\nprint('Prediction accuracy: {:.3f}'.format(accuracy))\n<docstring token>\n",
"<import token>\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
99,566 |
970aa15862862cfb8da071303a21c13c0c1a4eb7
|
from plone.app.testing import PloneWithPackageLayer
from plone.app.testing import IntegrationTesting
from plone.app.testing import FunctionalTesting
import collective.js.togetherjs
COLLECTIVE_JS_TOGETHERJS = PloneWithPackageLayer(
zcml_package=collective.js.togetherjs,
zcml_filename='testing.zcml',
gs_profile_id='collective.js.togetherjs:testing',
name="COLLECTIVE_JS_TOGETHERJS")
COLLECTIVE_JS_TOGETHERJS_INTEGRATION = IntegrationTesting(
bases=(COLLECTIVE_JS_TOGETHERJS, ),
name="COLLECTIVE_JS_TOGETHERJS_INTEGRATION")
COLLECTIVE_JS_TOGETHERJS_FUNCTIONAL = FunctionalTesting(
bases=(COLLECTIVE_JS_TOGETHERJS, ),
name="COLLECTIVE_JS_TOGETHERJS_FUNCTIONAL")
|
[
"from plone.app.testing import PloneWithPackageLayer\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import FunctionalTesting\n\nimport collective.js.togetherjs\n\n\nCOLLECTIVE_JS_TOGETHERJS = PloneWithPackageLayer(\n zcml_package=collective.js.togetherjs,\n zcml_filename='testing.zcml',\n gs_profile_id='collective.js.togetherjs:testing',\n name=\"COLLECTIVE_JS_TOGETHERJS\")\n\nCOLLECTIVE_JS_TOGETHERJS_INTEGRATION = IntegrationTesting(\n bases=(COLLECTIVE_JS_TOGETHERJS, ),\n name=\"COLLECTIVE_JS_TOGETHERJS_INTEGRATION\")\n\nCOLLECTIVE_JS_TOGETHERJS_FUNCTIONAL = FunctionalTesting(\n bases=(COLLECTIVE_JS_TOGETHERJS, ),\n name=\"COLLECTIVE_JS_TOGETHERJS_FUNCTIONAL\")\n",
"from plone.app.testing import PloneWithPackageLayer\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import FunctionalTesting\nimport collective.js.togetherjs\nCOLLECTIVE_JS_TOGETHERJS = PloneWithPackageLayer(zcml_package=collective.js\n .togetherjs, zcml_filename='testing.zcml', gs_profile_id=\n 'collective.js.togetherjs:testing', name='COLLECTIVE_JS_TOGETHERJS')\nCOLLECTIVE_JS_TOGETHERJS_INTEGRATION = IntegrationTesting(bases=(\n COLLECTIVE_JS_TOGETHERJS,), name='COLLECTIVE_JS_TOGETHERJS_INTEGRATION')\nCOLLECTIVE_JS_TOGETHERJS_FUNCTIONAL = FunctionalTesting(bases=(\n COLLECTIVE_JS_TOGETHERJS,), name='COLLECTIVE_JS_TOGETHERJS_FUNCTIONAL')\n",
"<import token>\nCOLLECTIVE_JS_TOGETHERJS = PloneWithPackageLayer(zcml_package=collective.js\n .togetherjs, zcml_filename='testing.zcml', gs_profile_id=\n 'collective.js.togetherjs:testing', name='COLLECTIVE_JS_TOGETHERJS')\nCOLLECTIVE_JS_TOGETHERJS_INTEGRATION = IntegrationTesting(bases=(\n COLLECTIVE_JS_TOGETHERJS,), name='COLLECTIVE_JS_TOGETHERJS_INTEGRATION')\nCOLLECTIVE_JS_TOGETHERJS_FUNCTIONAL = FunctionalTesting(bases=(\n COLLECTIVE_JS_TOGETHERJS,), name='COLLECTIVE_JS_TOGETHERJS_FUNCTIONAL')\n",
"<import token>\n<assignment token>\n"
] | false |
99,567 |
4d79c95206b0eb63155c87b3f9345066a66c07dc
|
import sys
import os
from os.path import join as opjoin
import json
import gc
import numpy as np
from random import randint
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from tqdm import tqdm
import matplotlib.pyplot as plt
from dataset import ExperienceDataset
from model import ReversiNet
from reversi_env import ReversiEnv
from rollout_factory import RolloutFactory
from utils import *
class Trainer():
"""docstring for Trainer."""
def __init__(self, config):
with open(config, 'r') as f:
config = json.load(f)
self.epochs = config['train']['epochs']
self.policy_epochs = config['train']['policy_epochs']
self.test_iters = config['test']['iters']
layers = config['model']['layers']
conv_size = config['model']['conv_size']
logheat = config['model']['logheat']
self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat=logheat)
env_samples = config['train']['env_samples']
self.factory = RolloutFactory(self.net, env_samples)
self.value_loss = nn.MSELoss()
epsilon = config['train']['epsilon']
self.ppo_low_bnd = 1 - epsilon
self.ppo_up_bnd = 1 + epsilon
lr = config['train']['lr']
weight_decay = config['train']['weight_decay']
self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=weight_decay)
self.plosses = []
self.vlosses = []
self.avg_wins = []
self.stand_time = []
if torch.cuda.is_available():
torch.cuda.set_device(1)
self.net.cuda()
self.device = torch.device("cuda")
print("Using GPU")
else:
self.device = torch.device("cpu")
print("No GPU detected")
self.write_interval = config['model']['write_interval']
self.train_info_path = config['model']['trainer_save_path']
self.policy_path = config['model']['policy_save_path'].split('.pt')[0]
self.graph_path = config['model']['graph_save_path'].split('.png')[0]
def train(self, itr=0):
acc = self.test()
for i in range(self.epochs):
avg_policy_loss = 0
avg_val_loss = 0
rollouts = self.factory.get_rollouts()
# Update the policy
experience_dataset = ExperienceDataset(rollouts)
data_loader = DataLoader(experience_dataset,
batch_size=256,
shuffle=True,
pin_memory=True)
self.net.train()
for _ in range(self.policy_epochs):
avg_policy_loss = 0
avg_val_loss = 0
for state, aprob, value in data_loader:
state = _prepare_tensor_batch(state, self.device).unsqueeze(1)
aprob = _prepare_tensor_batch(aprob, self.device)
value = _prepare_tensor_batch(value, self.device).unsqueeze(1)
# Calculate the ratio term
pdist, pval = self.net(state)
policy_loss = loss_pi(aprob, pdist)
val_loss = loss_v(value, pval)
# For logging
avg_val_loss += val_loss.item()
avg_policy_loss += policy_loss.item()
# Backpropagate
self.optim.zero_grad()
loss = policy_loss + val_loss
loss.backward()
self.optim.step()
# Log info
avg_val_loss /= len(data_loader)
avg_val_loss /= self.policy_epochs
avg_policy_loss /= len(data_loader)
avg_policy_loss /= self.policy_epochs
self.vlosses.append(avg_val_loss)
self.plosses.append(avg_policy_loss)
if (itr+i) % self.write_interval == 0:
acc = self.test()
self.avg_wins.append(acc)
print(
'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f' \
% ((itr+i), acc, avg_val_loss, avg_policy_loss) )
self.write_out(itr+i)
def test(self):
self.net.eval()
env = ReversiEnv()
rounds = env.length()//2
tot_rew = 0
tot_wins = 0
runs = self.test_iters
for _ in range(runs):
state, turn = env.reset()
actions = env.action_space()
done = False
for i in range(rounds):
in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0).to(self.device)
probs, _ = self.net(in_state)
probs = probs.squeeze().cpu().detach().numpy()
action = sample(probs, actions)
state, turn, reward, done = env.step(action)
actions = env.action_space()
# print('end p1')
if done:
break
probs = np.ones(actions.shape[0])
action = sample(probs, actions)
state, turn, reward, done = env.step(action)
actions = env.action_space()
# print('end p2')
if done:
break
# print(reward)
tot_rew += reward
if reward > 0:
tot_wins += 1
# elif reward == 0:
# tot_wins += 1
tot_rew /= runs
# print('Avg reward over {} runs: {}'.format(runs, tot_rew))
# print('Wins: {}/{}: {}'.format(tot_wins, runs, tot_wins/runs))
return tot_wins/runs
def read_in(self, itr=None):
train_info = {}
train_info = torch.load(self.train_info_path)
if itr is None:
itr = train_info['iter']
self.plosses = train_info['plosses']
self.vlosses = train_info['vlosses']
self.avg_wins = train_info['avg_wins']
self.optim = train_info['optimizer']
self.net.load_state_dict(torch.load(
str(self.policy_path + '_' + str(itr) + '.pt') ))
print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))
self.epochs += itr
return itr
def write_out(self, itr):
train_info = {}
train_info['iter'] = itr
train_info['plosses'] = self.plosses
train_info['vlosses'] = self.vlosses
train_info['avg_wins'] = self.avg_wins
train_info['optimizer'] = self.optim
torch.save( train_info, self.train_info_path )
torch.save( self.net.state_dict(),
str(self.policy_path + '_' + str(itr) + '.pt') )
if itr > 2:
plt.plot(self.vlosses, label='value loss')
plt.plot(self.plosses, label='policy loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')
plt.savefig(str(self.graph_path + '_loss.png'))
plt.clf()
plt.plot(self.avg_wins, label='avg wins')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('rewards')
plt.savefig(str(self.graph_path + '_wins.png'))
plt.clf()
def run(self, cont=False):
# check to see if we should continue from an existing checkpoint
# otherwise start from scratch
if cont:
itr = self.read_in()
print('continuing')
self.train(itr)
else:
self.train()
def main():
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' config')
exit(0)
cont = False
if len(sys.argv) > 2:
info = sys.argv[2]
if info == 'cont':
cont = True
config = sys.argv[1]
trainer = Trainer(config)
trainer.run(cont=cont)
if __name__ == '__main__':
main()
|
[
"import sys\nimport os\nfrom os.path import join as opjoin\nimport json\nimport gc\nimport numpy as np\nfrom random import randint\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\n\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom dataset import ExperienceDataset\nfrom model import ReversiNet\nfrom reversi_env import ReversiEnv\nfrom rollout_factory import RolloutFactory\nfrom utils import *\n\n\nclass Trainer():\n \"\"\"docstring for Trainer.\"\"\"\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat=logheat)\n\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n\n self.value_loss = nn.MSELoss()\n\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=weight_decay)\n\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device(\"cuda\")\n print(\"Using GPU\")\n else:\n self.device = torch.device(\"cpu\")\n print(\"No GPU detected\")\n\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n\n rollouts = self.factory.get_rollouts()\n\n # Update the policy\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset,\n batch_size=256,\n shuffle=True,\n pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device).unsqueeze(1)\n\n # Calculate the ratio term\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n\n # For logging\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n\n # Backpropagate\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n\n # Log info\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n\n if (itr+i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f' \\\n% ((itr+i), acc, avg_val_loss, avg_policy_loss) )\n self.write_out(itr+i)\n\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length()//2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n # print('end p1')\n if done:\n break\n\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n # print('end p2')\n if done:\n break\n\n # print(reward)\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n # elif reward == 0:\n # tot_wins += 1\n tot_rew /= runs\n # print('Avg reward over {} runs: {}'.format(runs, tot_rew))\n # print('Wins: {}/{}: {}'.format(tot_wins, runs, tot_wins/runs))\n return tot_wins/runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n\n self.net.load_state_dict(torch.load(\n str(self.policy_path + '_' + str(itr) + '.pt') ))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save( train_info, self.train_info_path )\n\n torch.save( self.net.state_dict(),\n str(self.policy_path + '_' + str(itr) + '.pt') )\n\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n\n def run(self, cont=False):\n # check to see if we should continue from an existing checkpoint\n # otherwise start from scratch\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\ndef main():\n if len(sys.argv) < 2:\n print('Usage: ' + sys.argv[0] + ' config')\n exit(0)\n\n cont = False\n if len(sys.argv) > 2:\n info = sys.argv[2]\n if info == 'cont':\n cont = True\n\n config = sys.argv[1]\n trainer = Trainer(config)\n trainer.run(cont=cont)\n\nif __name__ == '__main__':\n main()\n",
"import sys\nimport os\nfrom os.path import join as opjoin\nimport json\nimport gc\nimport numpy as np\nfrom random import randint\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom dataset import ExperienceDataset\nfrom model import ReversiNet\nfrom reversi_env import ReversiEnv\nfrom rollout_factory import RolloutFactory\nfrom utils import *\n\n\nclass Trainer:\n \"\"\"docstring for Trainer.\"\"\"\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save(train_info, self.train_info_path)\n torch.save(self.net.state_dict(), str(self.policy_path + '_' + str(\n itr) + '.pt'))\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\ndef main():\n if len(sys.argv) < 2:\n print('Usage: ' + sys.argv[0] + ' config')\n exit(0)\n cont = False\n if len(sys.argv) > 2:\n info = sys.argv[2]\n if info == 'cont':\n cont = True\n config = sys.argv[1]\n trainer = Trainer(config)\n trainer.run(cont=cont)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Trainer:\n \"\"\"docstring for Trainer.\"\"\"\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save(train_info, self.train_info_path)\n torch.save(self.net.state_dict(), str(self.policy_path + '_' + str(\n itr) + '.pt'))\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\ndef main():\n if len(sys.argv) < 2:\n print('Usage: ' + sys.argv[0] + ' config')\n exit(0)\n cont = False\n if len(sys.argv) > 2:\n info = sys.argv[2]\n if info == 'cont':\n cont = True\n config = sys.argv[1]\n trainer = Trainer(config)\n trainer.run(cont=cont)\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass Trainer:\n \"\"\"docstring for Trainer.\"\"\"\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save(train_info, self.train_info_path)\n torch.save(self.net.state_dict(), str(self.policy_path + '_' + str(\n itr) + '.pt'))\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\ndef main():\n if len(sys.argv) < 2:\n print('Usage: ' + sys.argv[0] + ' config')\n exit(0)\n cont = False\n if len(sys.argv) > 2:\n info = sys.argv[2]\n if info == 'cont':\n cont = True\n config = sys.argv[1]\n trainer = Trainer(config)\n trainer.run(cont=cont)\n\n\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n \"\"\"docstring for Trainer.\"\"\"\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save(train_info, self.train_info_path)\n torch.save(self.net.state_dict(), str(self.policy_path + '_' + str(\n itr) + '.pt'))\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n\n def write_out(self, itr):\n train_info = {}\n train_info['iter'] = itr\n train_info['plosses'] = self.plosses\n train_info['vlosses'] = self.vlosses\n train_info['avg_wins'] = self.avg_wins\n train_info['optimizer'] = self.optim\n torch.save(train_info, self.train_info_path)\n torch.save(self.net.state_dict(), str(self.policy_path + '_' + str(\n itr) + '.pt'))\n if itr > 2:\n plt.plot(self.vlosses, label='value loss')\n plt.plot(self.plosses, label='policy loss')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.savefig(str(self.graph_path + '_loss.png'))\n plt.clf()\n plt.plot(self.avg_wins, label='avg wins')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('rewards')\n plt.savefig(str(self.graph_path + '_wins.png'))\n plt.clf()\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n\n def test(self):\n self.net.eval()\n env = ReversiEnv()\n rounds = env.length() // 2\n tot_rew = 0\n tot_wins = 0\n runs = self.test_iters\n for _ in range(runs):\n state, turn = env.reset()\n actions = env.action_space()\n done = False\n for i in range(rounds):\n in_state = torch.FloatTensor(state).unsqueeze(0).unsqueeze(0\n ).to(self.device)\n probs, _ = self.net(in_state)\n probs = probs.squeeze().cpu().detach().numpy()\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n probs = np.ones(actions.shape[0])\n action = sample(probs, actions)\n state, turn, reward, done = env.step(action)\n actions = env.action_space()\n if done:\n break\n tot_rew += reward\n if reward > 0:\n tot_wins += 1\n tot_rew /= runs\n return tot_wins / runs\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n <function token>\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n <function token>\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n <function token>\n\n def run(self, cont=False):\n if cont:\n itr = self.read_in()\n print('continuing')\n self.train(itr)\n else:\n self.train()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n\n def train(self, itr=0):\n acc = self.test()\n for i in range(self.epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n rollouts = self.factory.get_rollouts()\n experience_dataset = ExperienceDataset(rollouts)\n data_loader = DataLoader(experience_dataset, batch_size=256,\n shuffle=True, pin_memory=True)\n self.net.train()\n for _ in range(self.policy_epochs):\n avg_policy_loss = 0\n avg_val_loss = 0\n for state, aprob, value in data_loader:\n state = _prepare_tensor_batch(state, self.device\n ).unsqueeze(1)\n aprob = _prepare_tensor_batch(aprob, self.device)\n value = _prepare_tensor_batch(value, self.device\n ).unsqueeze(1)\n pdist, pval = self.net(state)\n policy_loss = loss_pi(aprob, pdist)\n val_loss = loss_v(value, pval)\n avg_val_loss += val_loss.item()\n avg_policy_loss += policy_loss.item()\n self.optim.zero_grad()\n loss = policy_loss + val_loss\n loss.backward()\n self.optim.step()\n avg_val_loss /= len(data_loader)\n avg_val_loss /= self.policy_epochs\n avg_policy_loss /= len(data_loader)\n avg_policy_loss /= self.policy_epochs\n self.vlosses.append(avg_val_loss)\n self.plosses.append(avg_policy_loss)\n if (itr + i) % self.write_interval == 0:\n acc = self.test()\n self.avg_wins.append(acc)\n print(\n 'itr: % i, avg wins: % 6.2f, value loss: % 6.2f, policy loss: % 6.2f'\n % (itr + i, acc, avg_val_loss, avg_policy_loss))\n self.write_out(itr + i)\n <function token>\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n\n def __init__(self, config):\n with open(config, 'r') as f:\n config = json.load(f)\n self.epochs = config['train']['epochs']\n self.policy_epochs = config['train']['policy_epochs']\n self.test_iters = config['test']['iters']\n layers = config['model']['layers']\n conv_size = config['model']['conv_size']\n logheat = config['model']['logheat']\n self.net = ReversiNet(hidden_size=conv_size, layers=layers, logheat\n =logheat)\n env_samples = config['train']['env_samples']\n self.factory = RolloutFactory(self.net, env_samples)\n self.value_loss = nn.MSELoss()\n epsilon = config['train']['epsilon']\n self.ppo_low_bnd = 1 - epsilon\n self.ppo_up_bnd = 1 + epsilon\n lr = config['train']['lr']\n weight_decay = config['train']['weight_decay']\n self.optim = optim.Adam(self.net.parameters(), lr=lr, weight_decay=\n weight_decay)\n self.plosses = []\n self.vlosses = []\n self.avg_wins = []\n self.stand_time = []\n if torch.cuda.is_available():\n torch.cuda.set_device(1)\n self.net.cuda()\n self.device = torch.device('cuda')\n print('Using GPU')\n else:\n self.device = torch.device('cpu')\n print('No GPU detected')\n self.write_interval = config['model']['write_interval']\n self.train_info_path = config['model']['trainer_save_path']\n self.policy_path = config['model']['policy_save_path'].split('.pt')[0]\n self.graph_path = config['model']['graph_save_path'].split('.png')[0]\n <function token>\n <function token>\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def read_in(self, itr=None):\n train_info = {}\n train_info = torch.load(self.train_info_path)\n if itr is None:\n itr = train_info['iter']\n self.plosses = train_info['plosses']\n self.vlosses = train_info['vlosses']\n self.avg_wins = train_info['avg_wins']\n self.optim = train_info['optimizer']\n self.net.load_state_dict(torch.load(str(self.policy_path + '_' +\n str(itr) + '.pt')))\n print('loaded: ' + str(self.policy_path + '_' + str(itr) + '.pt'))\n self.epochs += itr\n return itr\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Trainer:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,568 |
f4ac3a6ee9239f09f31e4327ba39c7ae75eeedbc
|
import pytest
import torch
from torchstruct import TensorStruct
def test_struct_should_raise_if_constructed_from_invalid_data():
with pytest.raises(AssertionError):
_ = TensorStruct({'a': (1, 2)})
def test_struct_should_allow_to_create_single_zeros_tensor():
t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')
assert t.shape == (4, 5, 2, 3)
assert t.dtype == torch.float64
assert t.device.type == 'cpu'
def test_struct_should_allow_to_create_nested_zeros_tensors():
t = TensorStruct.zeros({
'a': 5,
'b': (10,),
'c': (3, 14),
'd': {
'e': 2,
'f': (3, 1, 4),
'g': {
'h': {
'i': (8, 2)
}
}
}
}, prefix_shape=(1,))
td = t.data()
assert td['a'].shape == (1, 5)
assert td['b'].shape == (1, 10)
assert td['c'].shape == (1, 3, 14)
assert td['d']['e'].shape == (1, 2)
assert td['d']['f'].shape == (1, 3, 1, 4)
assert td['d']['g']['h']['i'].shape == (1, 8, 2)
def test_struct_tensors_should_return_list_of_tensors_in_struct():
t = TensorStruct({
'a': torch.ones(5),
'b': {
'c': {
'd': torch.ones(5) * 2
}
}
})
ts = t.tensors()
assert len(ts) == 2
assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])
assert any([torch.all(torch.ones(5).eq(t_)) * 2 for t_ in ts])
def test_struct_common_size_should_return_size_of_first_tensor_in_dict():
t = TensorStruct({
'a': torch.ones((10, 2)),
'b': {
'c': torch.ones((5, 2))
}
})
assert t.common_size(0) in [10, 5]
|
[
"import pytest\r\nimport torch\r\n\r\nfrom torchstruct import TensorStruct\r\n\r\n\r\ndef test_struct_should_raise_if_constructed_from_invalid_data():\r\n with pytest.raises(AssertionError):\r\n _ = TensorStruct({'a': (1, 2)})\r\n\r\n\r\ndef test_struct_should_allow_to_create_single_zeros_tensor():\r\n t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')\r\n assert t.shape == (4, 5, 2, 3)\r\n assert t.dtype == torch.float64\r\n assert t.device.type == 'cpu'\r\n\r\n\r\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\r\n t = TensorStruct.zeros({\r\n 'a': 5,\r\n 'b': (10,),\r\n 'c': (3, 14),\r\n 'd': {\r\n 'e': 2,\r\n 'f': (3, 1, 4),\r\n 'g': {\r\n 'h': {\r\n 'i': (8, 2)\r\n }\r\n }\r\n }\r\n }, prefix_shape=(1,))\r\n td = t.data()\r\n assert td['a'].shape == (1, 5)\r\n assert td['b'].shape == (1, 10)\r\n assert td['c'].shape == (1, 3, 14)\r\n assert td['d']['e'].shape == (1, 2)\r\n assert td['d']['f'].shape == (1, 3, 1, 4)\r\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\r\n\r\n\r\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\r\n t = TensorStruct({\r\n 'a': torch.ones(5),\r\n 'b': {\r\n 'c': {\r\n 'd': torch.ones(5) * 2\r\n }\r\n }\r\n })\r\n ts = t.tensors()\r\n assert len(ts) == 2\r\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\r\n assert any([torch.all(torch.ones(5).eq(t_)) * 2 for t_ in ts])\r\n\r\n\r\ndef test_struct_common_size_should_return_size_of_first_tensor_in_dict():\r\n t = TensorStruct({\r\n 'a': torch.ones((10, 2)),\r\n 'b': {\r\n 'c': torch.ones((5, 2))\r\n }\r\n })\r\n assert t.common_size(0) in [10, 5]\r\n",
"import pytest\nimport torch\nfrom torchstruct import TensorStruct\n\n\ndef test_struct_should_raise_if_constructed_from_invalid_data():\n with pytest.raises(AssertionError):\n _ = TensorStruct({'a': (1, 2)})\n\n\ndef test_struct_should_allow_to_create_single_zeros_tensor():\n t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')\n assert t.shape == (4, 5, 2, 3)\n assert t.dtype == torch.float64\n assert t.device.type == 'cpu'\n\n\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\n t = TensorStruct.zeros({'a': 5, 'b': (10,), 'c': (3, 14), 'd': {'e': 2,\n 'f': (3, 1, 4), 'g': {'h': {'i': (8, 2)}}}}, prefix_shape=(1,))\n td = t.data()\n assert td['a'].shape == (1, 5)\n assert td['b'].shape == (1, 10)\n assert td['c'].shape == (1, 3, 14)\n assert td['d']['e'].shape == (1, 2)\n assert td['d']['f'].shape == (1, 3, 1, 4)\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\ndef test_struct_common_size_should_return_size_of_first_tensor_in_dict():\n t = TensorStruct({'a': torch.ones((10, 2)), 'b': {'c': torch.ones((5, 2))}}\n )\n assert t.common_size(0) in [10, 5]\n",
"<import token>\n\n\ndef test_struct_should_raise_if_constructed_from_invalid_data():\n with pytest.raises(AssertionError):\n _ = TensorStruct({'a': (1, 2)})\n\n\ndef test_struct_should_allow_to_create_single_zeros_tensor():\n t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')\n assert t.shape == (4, 5, 2, 3)\n assert t.dtype == torch.float64\n assert t.device.type == 'cpu'\n\n\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\n t = TensorStruct.zeros({'a': 5, 'b': (10,), 'c': (3, 14), 'd': {'e': 2,\n 'f': (3, 1, 4), 'g': {'h': {'i': (8, 2)}}}}, prefix_shape=(1,))\n td = t.data()\n assert td['a'].shape == (1, 5)\n assert td['b'].shape == (1, 10)\n assert td['c'].shape == (1, 3, 14)\n assert td['d']['e'].shape == (1, 2)\n assert td['d']['f'].shape == (1, 3, 1, 4)\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\ndef test_struct_common_size_should_return_size_of_first_tensor_in_dict():\n t = TensorStruct({'a': torch.ones((10, 2)), 'b': {'c': torch.ones((5, 2))}}\n )\n assert t.common_size(0) in [10, 5]\n",
"<import token>\n\n\ndef test_struct_should_raise_if_constructed_from_invalid_data():\n with pytest.raises(AssertionError):\n _ = TensorStruct({'a': (1, 2)})\n\n\ndef test_struct_should_allow_to_create_single_zeros_tensor():\n t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')\n assert t.shape == (4, 5, 2, 3)\n assert t.dtype == torch.float64\n assert t.device.type == 'cpu'\n\n\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\n t = TensorStruct.zeros({'a': 5, 'b': (10,), 'c': (3, 14), 'd': {'e': 2,\n 'f': (3, 1, 4), 'g': {'h': {'i': (8, 2)}}}}, prefix_shape=(1,))\n td = t.data()\n assert td['a'].shape == (1, 5)\n assert td['b'].shape == (1, 10)\n assert td['c'].shape == (1, 3, 14)\n assert td['d']['e'].shape == (1, 2)\n assert td['d']['f'].shape == (1, 3, 1, 4)\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\n<function token>\n",
"<import token>\n<function token>\n\n\ndef test_struct_should_allow_to_create_single_zeros_tensor():\n t = TensorStruct.zeros((2, 3), (4, 5), dtype=torch.float64, device='cpu')\n assert t.shape == (4, 5, 2, 3)\n assert t.dtype == torch.float64\n assert t.device.type == 'cpu'\n\n\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\n t = TensorStruct.zeros({'a': 5, 'b': (10,), 'c': (3, 14), 'd': {'e': 2,\n 'f': (3, 1, 4), 'g': {'h': {'i': (8, 2)}}}}, prefix_shape=(1,))\n td = t.data()\n assert td['a'].shape == (1, 5)\n assert td['b'].shape == (1, 10)\n assert td['c'].shape == (1, 3, 14)\n assert td['d']['e'].shape == (1, 2)\n assert td['d']['f'].shape == (1, 3, 1, 4)\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef test_struct_should_allow_to_create_nested_zeros_tensors():\n t = TensorStruct.zeros({'a': 5, 'b': (10,), 'c': (3, 14), 'd': {'e': 2,\n 'f': (3, 1, 4), 'g': {'h': {'i': (8, 2)}}}}, prefix_shape=(1,))\n td = t.data()\n assert td['a'].shape == (1, 5)\n assert td['b'].shape == (1, 10)\n assert td['c'].shape == (1, 3, 14)\n assert td['d']['e'].shape == (1, 2)\n assert td['d']['f'].shape == (1, 3, 1, 4)\n assert td['d']['g']['h']['i'].shape == (1, 8, 2)\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_struct_tensors_should_return_list_of_tensors_in_struct():\n t = TensorStruct({'a': torch.ones(5), 'b': {'c': {'d': torch.ones(5) * 2}}}\n )\n ts = t.tensors()\n assert len(ts) == 2\n assert any([torch.all(torch.ones(5).eq(t_)) for t_ in ts])\n assert any([(torch.all(torch.ones(5).eq(t_)) * 2) for t_ in ts])\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,569 |
45c6fb4f5e873a42497044a8275c12e98c4dfb8e
|
'''
Most of the recent big-budget science fiction movies can also be classified as action movies. You are given a table of science fiction movies called scifi_movies and another table of action movies called action_movies. Your goal is to find which movies are considered only science fiction movies. Once you have this table, you can merge the movies table in to see the movie names. Since this exercise is related to science fiction movies, use a right join as your superhero power to solve this problem.
The movies, scifi_movies, and action_movies tables have been loaded for you.
'''
# Merge action_movies to the scifi_movies with right join
action_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right',
suffixes=('_act','_sci'))
# From action_scifi, select only the rows where the genre_act column is null
scifi_only = action_scifi[action_scifi['genre_act'].isnull()]
# Merge the movies and scifi_only tables with an inner join
movies_and_scifi_only = movies.merge(scifi_only, left_on='id', right_on='movie_id')
# Print the first few rows and shape of movies_and_scifi_only
print(movies_and_scifi_only.head())
print(movies_and_scifi_only.shape)
|
[
"'''\nMost of the recent big-budget science fiction movies can also be classified as action movies. You are given a table of science fiction movies called scifi_movies and another table of action movies called action_movies. Your goal is to find which movies are considered only science fiction movies. Once you have this table, you can merge the movies table in to see the movie names. Since this exercise is related to science fiction movies, use a right join as your superhero power to solve this problem.\n\nThe movies, scifi_movies, and action_movies tables have been loaded for you.\n'''\n# Merge action_movies to the scifi_movies with right join\naction_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right',\n suffixes=('_act','_sci'))\n\n# From action_scifi, select only the rows where the genre_act column is null\nscifi_only = action_scifi[action_scifi['genre_act'].isnull()]\n\n# Merge the movies and scifi_only tables with an inner join\nmovies_and_scifi_only = movies.merge(scifi_only, left_on='id', right_on='movie_id')\n\n# Print the first few rows and shape of movies_and_scifi_only\nprint(movies_and_scifi_only.head())\nprint(movies_and_scifi_only.shape)",
"<docstring token>\naction_scifi = action_movies.merge(scifi_movies, on='movie_id', how='right',\n suffixes=('_act', '_sci'))\nscifi_only = action_scifi[action_scifi['genre_act'].isnull()]\nmovies_and_scifi_only = movies.merge(scifi_only, left_on='id', right_on=\n 'movie_id')\nprint(movies_and_scifi_only.head())\nprint(movies_and_scifi_only.shape)\n",
"<docstring token>\n<assignment token>\nprint(movies_and_scifi_only.head())\nprint(movies_and_scifi_only.shape)\n",
"<docstring token>\n<assignment token>\n<code token>\n"
] | false |
99,570 |
943edb3dd10a5af03e6b8716683498d0685b622e
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numbers
import numpy as np
import scipy.sparse as sp
from cvxpy.interface import numpy_interface as np_intf
# A mapping of class to interface.
INTERFACES = {np.ndarray: np_intf.NDArrayInterface(),
np.matrix: np_intf.MatrixInterface(),
sp.csc_matrix: np_intf.SparseMatrixInterface(),
}
# Default Numpy interface.
DEFAULT_NP_INTF = INTERFACES[np.ndarray]
# Default dense and sparse matrix interfaces.
DEFAULT_INTF = INTERFACES[np.ndarray]
DEFAULT_SPARSE_INTF = INTERFACES[sp.csc_matrix]
# Returns the interface for interacting with the target matrix class.
def get_matrix_interface(target_class):
return INTERFACES[target_class]
def get_cvxopt_dense_intf():
"""Dynamic import of CVXOPT dense interface.
"""
import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi
return dmi.DenseMatrixInterface()
def get_cvxopt_sparse_intf():
"""Dynamic import of CVXOPT sparse interface.
"""
import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi
return smi.SparseMatrixInterface()
# Tools for handling CVXOPT matrices.
def sparse2cvxopt(value):
"""Converts a SciPy sparse matrix to a CVXOPT sparse matrix.
Parameters
----------
sparse_mat : SciPy sparse matrix
The matrix to convert.
Returns
-------
CVXOPT spmatrix
The converted matrix.
"""
import cvxopt
if isinstance(value, (np.ndarray, np.matrix)):
return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')
# Convert scipy sparse matrices to coo form first.
elif sp.issparse(value):
value = value.tocoo()
return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),
value.col.tolist(), size=value.shape, tc='d')
def dense2cvxopt(value):
"""Converts a NumPy matrix to a CVXOPT matrix.
Parameters
----------
value : NumPy matrix/ndarray
The matrix to convert.
Returns
-------
CVXOPT matrix
The converted matrix.
"""
import cvxopt
return cvxopt.matrix(value, tc='d')
def cvxopt2dense(value):
"""Converts a CVXOPT matrix to a NumPy ndarray.
Parameters
----------
value : CVXOPT matrix
The matrix to convert.
Returns
-------
NumPy ndarray
The converted matrix.
"""
return np.array(value)
def is_sparse(constant) -> bool:
"""Is the constant a sparse matrix?
"""
return sp.issparse(constant)
# Get the dimensions of the constant.
def shape(constant):
if isinstance(constant, numbers.Number) or np.isscalar(constant):
return tuple()
elif isinstance(constant, list):
if len(constant) == 0:
return (0,)
elif isinstance(constant[0], numbers.Number): # Vector
return (len(constant),)
else: # Matrix
return (len(constant[0]), len(constant))
elif constant.__class__ in INTERFACES:
return INTERFACES[constant.__class__].shape(constant)
# Direct all sparse matrices to CSC interface.
elif is_sparse(constant):
return INTERFACES[sp.csc_matrix].shape(constant)
else:
raise TypeError("%s is not a valid type for a Constant value." % type(constant))
# Is the constant a column vector?
def is_vector(constant) -> bool:
return shape(constant)[1] == 1
# Is the constant a scalar?
def is_scalar(constant) -> bool:
return shape(constant) == (1, 1)
def from_2D_to_1D(constant):
"""Convert 2D Numpy matrices or arrays to 1D.
"""
if isinstance(constant, np.ndarray) and constant.ndim == 2:
return np.asarray(constant)[:, 0]
else:
return constant
def from_1D_to_2D(constant):
"""Convert 1D Numpy arrays to matrices.
"""
if isinstance(constant, np.ndarray) and constant.ndim == 1:
return np.mat(constant).T
else:
return constant
def convert(constant, sparse: bool = False, convert_scalars: bool = False):
"""Convert to appropriate type.
"""
if isinstance(constant, (list, np.matrix)):
return DEFAULT_INTF.const_to_matrix(constant,
convert_scalars=convert_scalars)
elif sparse:
return DEFAULT_SPARSE_INTF.const_to_matrix(constant,
convert_scalars=convert_scalars)
else:
return constant
# Get the value of the passed constant, interpreted as a scalar.
def scalar_value(constant):
if isinstance(constant, numbers.Number) or np.isscalar(constant):
return constant
elif isinstance(constant, list):
return constant[0]
elif constant.__class__ in INTERFACES:
return INTERFACES[constant.__class__].scalar_value(constant)
# Direct all sparse matrices to CSC interface.
elif is_sparse(constant):
return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())
else:
raise TypeError("%s is not a valid type for a Constant value." % type(constant))
# Return the collective sign of the matrix entries.
def sign(constant):
"""Return (is positive, is negative).
Parameters
----------
constant : numeric type
The numeric value to evaluate the sign of.
Returns
-------
tuple
The sign of the constant.
"""
if isinstance(constant, numbers.Number):
max_val = constant
min_val = constant
elif sp.issparse(constant):
max_val = constant.max()
min_val = constant.min()
else: # Convert to Numpy array.
mat = INTERFACES[np.ndarray].const_to_matrix(constant)
max_val = mat.max()
min_val = mat.min()
return (min_val >= 0, max_val <= 0)
def is_complex(constant, tol: float = 1e-5) -> bool:
"""Return (is real, is imaginary).
Parameters
----------
constant : numeric type
The numeric value to evaluate the sign of.
tol : float, optional
The largest magnitude considered nonzero.
Returns
-------
tuple
The sign of the constant.
"""
complex_type = np.iscomplexobj(constant)
if not complex_type:
return True, False
if isinstance(constant, numbers.Number):
real_max = np.abs(np.real(constant))
imag_max = np.abs(np.imag(constant))
elif sp.issparse(constant):
real_max = np.abs(constant.real).max()
imag_max = np.abs(constant.imag).max()
else: # Convert to Numpy array.
constant = INTERFACES[np.ndarray].const_to_matrix(constant)
real_max = np.abs(constant.real).max()
imag_max = np.abs(constant.imag).max()
return (real_max >= tol, imag_max >= tol)
# Get the value at the given index.
def index(constant, key):
if is_scalar(constant):
return constant
elif constant.__class__ in INTERFACES:
return INTERFACES[constant.__class__].index(constant, key)
# Use CSC interface for all sparse matrices.
elif is_sparse(constant):
interface = INTERFACES[sp.csc_matrix]
constant = interface.const_to_matrix(constant)
return interface.index(constant, key)
def is_hermitian(constant) -> bool:
"""Check if a matrix is Hermitian and/or symmetric.
"""
complex_type = np.iscomplexobj(constant)
if complex_type:
# TODO catch complex symmetric but not Hermitian?
is_symm = False
if sp.issparse(constant):
is_herm = is_sparse_symmetric(constant, complex=True)
else:
is_herm = np.allclose(constant, np.conj(constant.T))
else:
if sp.issparse(constant):
is_symm = is_sparse_symmetric(constant, complex=False)
else:
is_symm = np.allclose(constant, constant.T)
is_herm = is_symm
return is_symm, is_herm
def is_sparse_symmetric(m, complex: bool = False) -> bool:
"""Check if a sparse matrix is symmetric
Parameters
----------
m : array or sparse matrix
A square matrix.
Returns
-------
check : bool
The check result.
"""
# https://mail.scipy.org/pipermail/scipy-dev/2014-October/020101.html
if m.shape[0] != m.shape[1]:
raise ValueError('m must be a square matrix')
if not isinstance(m, sp.coo_matrix):
m = sp.coo_matrix(m)
r, c, v = m.row, m.col, m.data
tril_no_diag = r > c
triu_no_diag = c > r
if triu_no_diag.sum() != tril_no_diag.sum():
return False
rl = r[tril_no_diag]
cl = c[tril_no_diag]
vl = v[tril_no_diag]
ru = r[triu_no_diag]
cu = c[triu_no_diag]
vu = v[triu_no_diag]
sortl = np.lexsort((cl, rl))
sortu = np.lexsort((ru, cu))
vl = vl[sortl]
vu = vu[sortu]
if complex:
check = np.allclose(vl, np.conj(vu))
else:
check = np.allclose(vl, vu)
return check
|
[
"\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numbers\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom cvxpy.interface import numpy_interface as np_intf\n\n# A mapping of class to interface.\nINTERFACES = {np.ndarray: np_intf.NDArrayInterface(),\n np.matrix: np_intf.MatrixInterface(),\n sp.csc_matrix: np_intf.SparseMatrixInterface(),\n }\n# Default Numpy interface.\nDEFAULT_NP_INTF = INTERFACES[np.ndarray]\n# Default dense and sparse matrix interfaces.\nDEFAULT_INTF = INTERFACES[np.ndarray]\nDEFAULT_SPARSE_INTF = INTERFACES[sp.csc_matrix]\n\n\n# Returns the interface for interacting with the target matrix class.\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n# Tools for handling CVXOPT matrices.\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n # Convert scipy sparse matrices to coo form first.\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\ndef cvxopt2dense(value):\n \"\"\"Converts a CVXOPT matrix to a NumPy ndarray.\n\n Parameters\n ----------\n value : CVXOPT matrix\n The matrix to convert.\n\n Returns\n -------\n NumPy ndarray\n The converted matrix.\n \"\"\"\n return np.array(value)\n\n\ndef is_sparse(constant) -> bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n# Get the dimensions of the constant.\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return (0,)\n elif isinstance(constant[0], numbers.Number): # Vector\n return (len(constant),)\n else: # Matrix\n return (len(constant[0]), len(constant))\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n # Direct all sparse matrices to CSC interface.\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError(\"%s is not a valid type for a Constant value.\" % type(constant))\n\n# Is the constant a column vector?\n\n\ndef is_vector(constant) -> bool:\n return shape(constant)[1] == 1\n\n# Is the constant a scalar?\n\n\ndef is_scalar(constant) -> bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool = False, convert_scalars: bool = False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n# Get the value of the passed constant, interpreted as a scalar.\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n # Direct all sparse matrices to CSC interface.\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError(\"%s is not a valid type for a Constant value.\" % type(constant))\n\n# Return the collective sign of the matrix entries.\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else: # Convert to Numpy array.\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return (min_val >= 0, max_val <= 0)\n\n\ndef is_complex(constant, tol: float = 1e-5) -> bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else: # Convert to Numpy array.\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return (real_max >= tol, imag_max >= tol)\n\n# Get the value at the given index.\n\n\ndef index(constant, key):\n if is_scalar(constant):\n return constant\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].index(constant, key)\n # Use CSC interface for all sparse matrices.\n elif is_sparse(constant):\n interface = INTERFACES[sp.csc_matrix]\n constant = interface.const_to_matrix(constant)\n return interface.index(constant, key)\n\n\ndef is_hermitian(constant) -> bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n # TODO catch complex symmetric but not Hermitian?\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool = False) -> bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n # https://mail.scipy.org/pipermail/scipy-dev/2014-October/020101.html\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n\n return check\n",
"<docstring token>\nimport numbers\nimport numpy as np\nimport scipy.sparse as sp\nfrom cvxpy.interface import numpy_interface as np_intf\nINTERFACES = {np.ndarray: np_intf.NDArrayInterface(), np.matrix: np_intf.\n MatrixInterface(), sp.csc_matrix: np_intf.SparseMatrixInterface()}\nDEFAULT_NP_INTF = INTERFACES[np.ndarray]\nDEFAULT_INTF = INTERFACES[np.ndarray]\nDEFAULT_SPARSE_INTF = INTERFACES[sp.csc_matrix]\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\ndef cvxopt2dense(value):\n \"\"\"Converts a CVXOPT matrix to a NumPy ndarray.\n\n Parameters\n ----------\n value : CVXOPT matrix\n The matrix to convert.\n\n Returns\n -------\n NumPy ndarray\n The converted matrix.\n \"\"\"\n return np.array(value)\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\ndef index(constant, key):\n if is_scalar(constant):\n return constant\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].index(constant, key)\n elif is_sparse(constant):\n interface = INTERFACES[sp.csc_matrix]\n constant = interface.const_to_matrix(constant)\n return interface.index(constant, key)\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\nINTERFACES = {np.ndarray: np_intf.NDArrayInterface(), np.matrix: np_intf.\n MatrixInterface(), sp.csc_matrix: np_intf.SparseMatrixInterface()}\nDEFAULT_NP_INTF = INTERFACES[np.ndarray]\nDEFAULT_INTF = INTERFACES[np.ndarray]\nDEFAULT_SPARSE_INTF = INTERFACES[sp.csc_matrix]\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\ndef cvxopt2dense(value):\n \"\"\"Converts a CVXOPT matrix to a NumPy ndarray.\n\n Parameters\n ----------\n value : CVXOPT matrix\n The matrix to convert.\n\n Returns\n -------\n NumPy ndarray\n The converted matrix.\n \"\"\"\n return np.array(value)\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\ndef index(constant, key):\n if is_scalar(constant):\n return constant\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].index(constant, key)\n elif is_sparse(constant):\n interface = INTERFACES[sp.csc_matrix]\n constant = interface.const_to_matrix(constant)\n return interface.index(constant, key)\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\ndef cvxopt2dense(value):\n \"\"\"Converts a CVXOPT matrix to a NumPy ndarray.\n\n Parameters\n ----------\n value : CVXOPT matrix\n The matrix to convert.\n\n Returns\n -------\n NumPy ndarray\n The converted matrix.\n \"\"\"\n return np.array(value)\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\ndef index(constant, key):\n if is_scalar(constant):\n return constant\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].index(constant, key)\n elif is_sparse(constant):\n interface = INTERFACES[sp.csc_matrix]\n constant = interface.const_to_matrix(constant)\n return interface.index(constant, key)\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\ndef cvxopt2dense(value):\n \"\"\"Converts a CVXOPT matrix to a NumPy ndarray.\n\n Parameters\n ----------\n value : CVXOPT matrix\n The matrix to convert.\n\n Returns\n -------\n NumPy ndarray\n The converted matrix.\n \"\"\"\n return np.array(value)\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\ndef from_1D_to_2D(constant):\n \"\"\"Convert 1D Numpy arrays to matrices.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\ndef dense2cvxopt(value):\n \"\"\"Converts a NumPy matrix to a CVXOPT matrix.\n\n Parameters\n ----------\n value : NumPy matrix/ndarray\n The matrix to convert.\n\n Returns\n -------\n CVXOPT matrix\n The converted matrix.\n \"\"\"\n import cvxopt\n return cvxopt.matrix(value, tc='d')\n\n\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\ndef is_sparse_symmetric(m, complex: bool=False) ->bool:\n \"\"\"Check if a sparse matrix is symmetric\n\n Parameters\n ----------\n m : array or sparse matrix\n A square matrix.\n\n Returns\n -------\n check : bool\n The check result.\n\n \"\"\"\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, sp.coo_matrix):\n m = sp.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n if complex:\n check = np.allclose(vl, np.conj(vu))\n else:\n check = np.allclose(vl, vu)\n return check\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef sign(constant):\n \"\"\"Return (is positive, is negative).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n if isinstance(constant, numbers.Number):\n max_val = constant\n min_val = constant\n elif sp.issparse(constant):\n max_val = constant.max()\n min_val = constant.min()\n else:\n mat = INTERFACES[np.ndarray].const_to_matrix(constant)\n max_val = mat.max()\n min_val = mat.min()\n return min_val >= 0, max_val <= 0\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n\n\ndef is_complex(constant, tol: float=1e-05) ->bool:\n \"\"\"Return (is real, is imaginary).\n\n Parameters\n ----------\n constant : numeric type\n The numeric value to evaluate the sign of.\n tol : float, optional\n The largest magnitude considered nonzero.\n\n Returns\n -------\n tuple\n The sign of the constant.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if not complex_type:\n return True, False\n if isinstance(constant, numbers.Number):\n real_max = np.abs(np.real(constant))\n imag_max = np.abs(np.imag(constant))\n elif sp.issparse(constant):\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n else:\n constant = INTERFACES[np.ndarray].const_to_matrix(constant)\n real_max = np.abs(constant.real).max()\n imag_max = np.abs(constant.imag).max()\n return real_max >= tol, imag_max >= tol\n\n\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n\n\ndef convert(constant, sparse: bool=False, convert_scalars: bool=False):\n \"\"\"Convert to appropriate type.\n \"\"\"\n if isinstance(constant, (list, np.matrix)):\n return DEFAULT_INTF.const_to_matrix(constant, convert_scalars=\n convert_scalars)\n elif sparse:\n return DEFAULT_SPARSE_INTF.const_to_matrix(constant,\n convert_scalars=convert_scalars)\n else:\n return constant\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\ndef is_scalar(constant) ->bool:\n return shape(constant) == (1, 1)\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\ndef shape(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return tuple()\n elif isinstance(constant, list):\n if len(constant) == 0:\n return 0,\n elif isinstance(constant[0], numbers.Number):\n return len(constant),\n else:\n return len(constant[0]), len(constant)\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].shape(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].shape(constant)\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n\n\ndef is_sparse(constant) ->bool:\n \"\"\"Is the constant a sparse matrix?\n \"\"\"\n return sp.issparse(constant)\n\n\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\ndef get_cvxopt_dense_intf():\n \"\"\"Dynamic import of CVXOPT dense interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\n<function token>\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n\n\ndef from_2D_to_1D(constant):\n \"\"\"Convert 2D Numpy matrices or arrays to 1D.\n \"\"\"\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant\n\n\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef get_matrix_interface(target_class):\n return INTERFACES[target_class]\n\n\n<function token>\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef get_cvxopt_sparse_intf():\n \"\"\"Dynamic import of CVXOPT sparse interface.\n \"\"\"\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef scalar_value(constant):\n if isinstance(constant, numbers.Number) or np.isscalar(constant):\n return constant\n elif isinstance(constant, list):\n return constant[0]\n elif constant.__class__ in INTERFACES:\n return INTERFACES[constant.__class__].scalar_value(constant)\n elif is_sparse(constant):\n return INTERFACES[sp.csc_matrix].scalar_value(constant.tocsc())\n else:\n raise TypeError('%s is not a valid type for a Constant value.' %\n type(constant))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_vector(constant) ->bool:\n return shape(constant)[1] == 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef is_hermitian(constant) ->bool:\n \"\"\"Check if a matrix is Hermitian and/or symmetric.\n \"\"\"\n complex_type = np.iscomplexobj(constant)\n if complex_type:\n is_symm = False\n if sp.issparse(constant):\n is_herm = is_sparse_symmetric(constant, complex=True)\n else:\n is_herm = np.allclose(constant, np.conj(constant.T))\n else:\n if sp.issparse(constant):\n is_symm = is_sparse_symmetric(constant, complex=False)\n else:\n is_symm = np.allclose(constant, constant.T)\n is_herm = is_symm\n return is_symm, is_herm\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef sparse2cvxopt(value):\n \"\"\"Converts a SciPy sparse matrix to a CVXOPT sparse matrix.\n\n Parameters\n ----------\n sparse_mat : SciPy sparse matrix\n The matrix to convert.\n\n Returns\n -------\n CVXOPT spmatrix\n The converted matrix.\n \"\"\"\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,571 |
f15ea2e50ab65b574465167b77c67d2b5cf8a81b
|
class Solution:
def xorOperation(self, n: int, start: int) -> int:
res = 0
for i in range(n):
res ^= start + 2*i
return res
s = Solution()
n = 10
start = 5
print(s.xorOperation(n,start))
|
[
"class Solution:\n def xorOperation(self, n: int, start: int) -> int:\n res = 0\n for i in range(n):\n res ^= start + 2*i\n return res\ns = Solution()\nn = 10\nstart = 5\nprint(s.xorOperation(n,start))",
"class Solution:\n\n def xorOperation(self, n: int, start: int) ->int:\n res = 0\n for i in range(n):\n res ^= start + 2 * i\n return res\n\n\ns = Solution()\nn = 10\nstart = 5\nprint(s.xorOperation(n, start))\n",
"class Solution:\n\n def xorOperation(self, n: int, start: int) ->int:\n res = 0\n for i in range(n):\n res ^= start + 2 * i\n return res\n\n\n<assignment token>\nprint(s.xorOperation(n, start))\n",
"class Solution:\n\n def xorOperation(self, n: int, start: int) ->int:\n res = 0\n for i in range(n):\n res ^= start + 2 * i\n return res\n\n\n<assignment token>\n<code token>\n",
"class Solution:\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<assignment token>\n<code token>\n"
] | false |
99,572 |
a77c2a10c7966568e9c69c4997951fec2f91b06d
|
"""
It seems that prime number generation is a frequently reoccurring subtask when solving Project Euler problems...
I've been using the Sieve of Erastosthenes, which is relatively efficient O(N * log (log N)), but I just read that the Sieve of Atkin
can perform faster, in O(N) time. So I'm going to take the algorithm from the wikipedia page -
"https://en.wikipedia.org/wiki/Sieve_of_Atkin"
"""
import math
import time
import sys
import json
import os
import numpy as np
def sieve_atkin(limit):
P = [2,3]
sieve=[False]*(limit+1)
for x in range(1,int(math.sqrt(limit))+1):
for y in range(1,int(math.sqrt(limit))+1):
n = 4*x**2 + y**2
if n<=limit and (n%12==1 or n%12==5) : sieve[n] = not sieve[n]
n = 3*x**2+y**2
if n<= limit and n%12==7 : sieve[n] = not sieve[n]
n = 3*x**2 - y**2
if x>y and n<=limit and n%12==11 : sieve[n] = not sieve[n]
for x in range(5,int(math.sqrt(limit))):
if sieve[x]:
for y in range(x**2,limit+1,x**2):
sieve[y] = False
for p in range(5,limit):
if sieve[p] : P.append(p)
return P
def sieve_atkin_nump(limit):
pass
if __name__ == '__main__':
startTime = time.perf_counter()
limit = int(sys.argv[1])
if len(sys.argv) == 3:
target_dir = sys.argv[2]
else:
target_dir = '.'
primes = np.array(sieve_atkin(limit))
filename = 'primes' + '1e' + str(len(str(limit))) + '.npy'
with open(os.path.join(target_dir,filename),'wb') as f:
np.save(f,primes)
endTime = time.perf_counter()
print("Time elapsed:", '{:0.6f}'.format(endTime - startTime), "seconds.")
|
[
"\"\"\"\nIt seems that prime number generation is a frequently reoccurring subtask when solving Project Euler problems...\nI've been using the Sieve of Erastosthenes, which is relatively efficient O(N * log (log N)), but I just read that the Sieve of Atkin\ncan perform faster, in O(N) time. So I'm going to take the algorithm from the wikipedia page -\n\n \"https://en.wikipedia.org/wiki/Sieve_of_Atkin\"\n\"\"\"\n\n\n\nimport math\nimport time\nimport sys\nimport json\nimport os\nimport numpy as np\n\ndef sieve_atkin(limit):\n P = [2,3]\n sieve=[False]*(limit+1)\n for x in range(1,int(math.sqrt(limit))+1):\n for y in range(1,int(math.sqrt(limit))+1):\n n = 4*x**2 + y**2\n if n<=limit and (n%12==1 or n%12==5) : sieve[n] = not sieve[n]\n n = 3*x**2+y**2\n if n<= limit and n%12==7 : sieve[n] = not sieve[n]\n n = 3*x**2 - y**2\n if x>y and n<=limit and n%12==11 : sieve[n] = not sieve[n]\n for x in range(5,int(math.sqrt(limit))):\n if sieve[x]:\n for y in range(x**2,limit+1,x**2):\n sieve[y] = False\n for p in range(5,limit):\n if sieve[p] : P.append(p)\n return P\n\ndef sieve_atkin_nump(limit):\n pass \n\n\nif __name__ == '__main__':\n startTime = time.perf_counter()\n \n limit = int(sys.argv[1]) \n\n if len(sys.argv) == 3:\n target_dir = sys.argv[2]\n else:\n target_dir = '.'\n\n\n primes = np.array(sieve_atkin(limit))\n filename = 'primes' + '1e' + str(len(str(limit))) + '.npy'\n\n with open(os.path.join(target_dir,filename),'wb') as f:\n np.save(f,primes)\n endTime = time.perf_counter()\n print(\"Time elapsed:\", '{:0.6f}'.format(endTime - startTime), \"seconds.\")\n\n\n\n\n",
"<docstring token>\nimport math\nimport time\nimport sys\nimport json\nimport os\nimport numpy as np\n\n\ndef sieve_atkin(limit):\n P = [2, 3]\n sieve = [False] * (limit + 1)\n for x in range(1, int(math.sqrt(limit)) + 1):\n for y in range(1, int(math.sqrt(limit)) + 1):\n n = 4 * x ** 2 + y ** 2\n if n <= limit and (n % 12 == 1 or n % 12 == 5):\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 + y ** 2\n if n <= limit and n % 12 == 7:\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 - y ** 2\n if x > y and n <= limit and n % 12 == 11:\n sieve[n] = not sieve[n]\n for x in range(5, int(math.sqrt(limit))):\n if sieve[x]:\n for y in range(x ** 2, limit + 1, x ** 2):\n sieve[y] = False\n for p in range(5, limit):\n if sieve[p]:\n P.append(p)\n return P\n\n\ndef sieve_atkin_nump(limit):\n pass\n\n\nif __name__ == '__main__':\n startTime = time.perf_counter()\n limit = int(sys.argv[1])\n if len(sys.argv) == 3:\n target_dir = sys.argv[2]\n else:\n target_dir = '.'\n primes = np.array(sieve_atkin(limit))\n filename = 'primes' + '1e' + str(len(str(limit))) + '.npy'\n with open(os.path.join(target_dir, filename), 'wb') as f:\n np.save(f, primes)\n endTime = time.perf_counter()\n print('Time elapsed:', '{:0.6f}'.format(endTime - startTime), 'seconds.')\n",
"<docstring token>\n<import token>\n\n\ndef sieve_atkin(limit):\n P = [2, 3]\n sieve = [False] * (limit + 1)\n for x in range(1, int(math.sqrt(limit)) + 1):\n for y in range(1, int(math.sqrt(limit)) + 1):\n n = 4 * x ** 2 + y ** 2\n if n <= limit and (n % 12 == 1 or n % 12 == 5):\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 + y ** 2\n if n <= limit and n % 12 == 7:\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 - y ** 2\n if x > y and n <= limit and n % 12 == 11:\n sieve[n] = not sieve[n]\n for x in range(5, int(math.sqrt(limit))):\n if sieve[x]:\n for y in range(x ** 2, limit + 1, x ** 2):\n sieve[y] = False\n for p in range(5, limit):\n if sieve[p]:\n P.append(p)\n return P\n\n\ndef sieve_atkin_nump(limit):\n pass\n\n\nif __name__ == '__main__':\n startTime = time.perf_counter()\n limit = int(sys.argv[1])\n if len(sys.argv) == 3:\n target_dir = sys.argv[2]\n else:\n target_dir = '.'\n primes = np.array(sieve_atkin(limit))\n filename = 'primes' + '1e' + str(len(str(limit))) + '.npy'\n with open(os.path.join(target_dir, filename), 'wb') as f:\n np.save(f, primes)\n endTime = time.perf_counter()\n print('Time elapsed:', '{:0.6f}'.format(endTime - startTime), 'seconds.')\n",
"<docstring token>\n<import token>\n\n\ndef sieve_atkin(limit):\n P = [2, 3]\n sieve = [False] * (limit + 1)\n for x in range(1, int(math.sqrt(limit)) + 1):\n for y in range(1, int(math.sqrt(limit)) + 1):\n n = 4 * x ** 2 + y ** 2\n if n <= limit and (n % 12 == 1 or n % 12 == 5):\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 + y ** 2\n if n <= limit and n % 12 == 7:\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 - y ** 2\n if x > y and n <= limit and n % 12 == 11:\n sieve[n] = not sieve[n]\n for x in range(5, int(math.sqrt(limit))):\n if sieve[x]:\n for y in range(x ** 2, limit + 1, x ** 2):\n sieve[y] = False\n for p in range(5, limit):\n if sieve[p]:\n P.append(p)\n return P\n\n\ndef sieve_atkin_nump(limit):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef sieve_atkin(limit):\n P = [2, 3]\n sieve = [False] * (limit + 1)\n for x in range(1, int(math.sqrt(limit)) + 1):\n for y in range(1, int(math.sqrt(limit)) + 1):\n n = 4 * x ** 2 + y ** 2\n if n <= limit and (n % 12 == 1 or n % 12 == 5):\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 + y ** 2\n if n <= limit and n % 12 == 7:\n sieve[n] = not sieve[n]\n n = 3 * x ** 2 - y ** 2\n if x > y and n <= limit and n % 12 == 11:\n sieve[n] = not sieve[n]\n for x in range(5, int(math.sqrt(limit))):\n if sieve[x]:\n for y in range(x ** 2, limit + 1, x ** 2):\n sieve[y] = False\n for p in range(5, limit):\n if sieve[p]:\n P.append(p)\n return P\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,573 |
55fbae4517d8f8e6d2f9f426a56aaf18c9620a4b
|
#!/usr/bin/env python2
# written by Moses Arocha
# Created in Python, with the help of TJ O'Connor's book "Violent Python".
from scapy.all import *
import sys
import os
interface = 'mon0' # Uses the wireless NIC called mon0, must put Network Card in Monitor mode with name of mon0
HiddenNetworks = []
ShownNetworks = []
def SniffNetwork(p):
if p.haslayer(Dot11ProbeResp):
MACAddr = p.getlayer(Dot11).addr2 # Grabs the MAC Address of the wireless connection
if (MACAddr in HiddenNetworks) & (MACAddr not in ShownNetworks): # Checks to see if MAC Address is in wireless
netName = p.getlayer(Dot11ProbeResp).info
print '\t[Success] Decloacked Hidden SSID ' + netName + ' for MAC: ' + MACAddr
ShownNetworks.append(MACAddr)
if p.haslayer(Dot11Beacon): # Function that detects the hidden networks Beacon signals
if p.getlayer(Dot11Beacon).info == '':
MACAddr = p.getlayer(Dot11).MACAddr
if MACAddr not in HiddenNetworks:
print '\t[Attempt] Detected Hidden SSID with MAC: ' + MACAddr
HiddenNetworks.append(MACAddr)
if not os.geteuid() == 0:
sys.exit('\t Please Run As Root!!') # Checks to see if the user is root, this code can only be run in root
os.system('airmon-ng start wlan0') # Interacts with terminal to put the wireless NIC in monitor mode.
print " \t The Sniffing Has Begun... Please Wait... \n"
sniff(iface=interface, prn=SniffNetwork) # Must be placed last so monitor mode can be enabled.
|
[
"#!/usr/bin/env python2\n# written by Moses Arocha\n# Created in Python, with the help of TJ O'Connor's book \"Violent Python\".\n\n\nfrom scapy.all import *\n\nimport sys\nimport os\n\ninterface = 'mon0'\t\t\t\t# Uses the wireless NIC called mon0, must put Network Card in Monitor mode with name of mon0\nHiddenNetworks = []\nShownNetworks = []\n\ndef SniffNetwork(p):\n if p.haslayer(Dot11ProbeResp):\n MACAddr = p.getlayer(Dot11).addr2\t\t\t\t\t# Grabs the MAC Address of the wireless connection\n\tif (MACAddr in HiddenNetworks) & (MACAddr not in ShownNetworks):\t# Checks to see if MAC Address is in wireless \n\t netName = p.getlayer(Dot11ProbeResp).info\n\t print '\\t[Success] Decloacked Hidden SSID ' + netName + ' for MAC: ' + MACAddr\t\n\t ShownNetworks.append(MACAddr)\nif p.haslayer(Dot11Beacon): \t\t\t\t# Function that detects the hidden networks Beacon signals\n if p.getlayer(Dot11Beacon).info == '':\n MACAddr = p.getlayer(Dot11).MACAddr\n\tif MACAddr not in HiddenNetworks:\n\t print '\\t[Attempt] Detected Hidden SSID with MAC: ' + MACAddr\t\n\t HiddenNetworks.append(MACAddr)\n\nif not os.geteuid() == 0:\n sys.exit('\\t Please Run As Root!!')\t\t# Checks to see if the user is root, this code can only be run in root\nos.system('airmon-ng start wlan0')\t\t# Interacts with terminal to put the wireless NIC in monitor mode.\nprint \" \\t The Sniffing Has Begun... Please Wait... \\n\"\nsniff(iface=interface, prn=SniffNetwork)\t# Must be placed last so monitor mode can be enabled.\n\n\n\n"
] | true |
99,574 |
f65b9045447d228ee789273d06cefc89ccc50e7a
|
import pandas as pd
import numpy as np
import pickle
'''
Parse 1976-2016 house data from https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/IG0UN2
@relFilePath : file path of house results data, relative to python notebook
@return: dataframe indexed by (year, state, district)
'''
def load_data(relFilePath, minYear=2010):
''' Keep only the winner and 2nd place candidates within each state's district for every year.
arguments:
relFilePath -- path to the data file (csv)
minYear -- only records for all years from and after the min year will be kept (int)
returns:
dataframe with only the winners (pandas.dataframe)
dataframe with only the 2nd place candidates (pandas.dataframe)
'''
data_df = pd.read_csv(relFilePath)
winners_df = pd.DataFrame()
winners2_df = pd.DataFrame()
for key, shard in data_df.groupby(['year', 'state_po', 'district']):
if int(key[0]) >= minYear:
#convention: 2nd winner = 1st winner if only 1 player
winners_df = winners_df.append(shard.loc[shard['candidatevotes'].idxmax()])
sortedIndices = (shard['candidatevotes'].values.argsort()[::-1])
if len(sortedIndices) > 1:
winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])
else:
winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])
return winners_df, winners2_df
def clean_index(df, clean_before_build=True):
'''Performs general clean up tasks on the key columns. Generates the master key.
arguments:
df -- dataframe to clean up, should contain the columns 'district', 'state_po' and 'year' (pandas.dataframe)
returns:
dataframe with cleaned key columns and index (pandas.dataframe)
'''
if clean_before_build:
# drop default index
df = df.reset_index().drop(['index','state'], axis=1)
# rename state code
df = df.rename(columns={'state_po' : 'state'})
#format year and district columns as ints
df = df.astype({'year': int, 'district': int})
#make sure all districts start with 1
df.loc[df['district']==0, 'district'] = 1
# glue together the columns to get a more descriptive index
df.index = ['{0}_{1:02d}_{2}'.format(row['state'],row['district'],row['year']) for _,row in df.iterrows()]
return df
def fetch_index(df, df2, save=False, load=False):
'''Helper function for generating/loading master index for syncing between data sources.
arguments:
df -- dataframe to parse index from, MUST CONTAIN FULL COPIES OF THE 'district', 'state_po', 'year' COLUMNS (pandas.dataframe)
returns:
dataframe with master index for syncing between data sources.
'''
if not load:
# Make a dummy dataframe so everyone else can make complete dataframes
tmp1 = df[['district', 'state', 'year']]
tmp2 = df2[['district', 'state', 'year']]
master_index = pd.concat([tmp1, tmp2])
if save:
pickle.dump(master_index, open('Datasets/master_index.p', 'wb'))
return master_index
else:
master_index = pickle.load(open('Datasets/master_index.p', 'rb'))
return master_index
def fetch_trimmed_data(df1, df2, minYear=2012):
'''Compile training data. Additional cleaning and processing to generate additional features.
arguments:
df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()
df2 -- dataframe with 2nd place candidates for each race
minYear -- only records for all years from and after the min year will be kept (int)
returns:
dataframe containing training data.
'''
df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes', 'totalvotes', 'candidate']]
df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes', 'totalvotes', 'candidate']]
########################################## ADDITIONAL CLEANING RELATED TO PARTY ##########################################
#democratic-farmer-labor -> democratic party (one entry in 2012)
df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'
#tax revolt -> republican party (one entry in 2012)
df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'
#no clear indication which was to cast it, go by candidates closest affiliation between democrat/republican
#independent -> democrat (one entry in 2004 -- bernard sanders)
df1.loc[df1['party'] == 'independent', 'party'] = 'democrat'
#reform -> republican (two entires in 2002, 2004 -- henry e. brown jr., 2002 -- barbara dale washer)
df1.loc[df1['party'] == 'reform', 'party'] = 'republican'
#republican/democrat -> republican (one entry in 2002) -- Don Sherwood
df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'
#KS 1.0: republican (tea party) -- might be nan because he ran under republican party ticket but he's actually from tea party?
#KS 2.0: republican (tea party)
#KS 3.0: republican (?)
#KS 4.0: republican (tea party)
#LA 1.0: republican (it's complicated)
#LA 2.0: democrat (?)
#LA 3.0: republican
#if there is a run off election, we don't include it in the data. so vote counts could be iffy (e.g. see issues with verifying LA 3.0 vote counts)
#winner may be in correct then if the votes are not from the run-off election, like they should be! in this case..
#LA 4.0: republican (tea party? maybe...)
#LA 5.0: republican (tea party caucus)
#LA 6.0: republican (tea party)
#MS 1.0: republican
#MS 2.0: democrat (??)
#MS 3.0: republican
#MS 4.0: republican
#ND 0.0: republican
#WY 0.0: republican
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 1.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 2.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 3.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 4.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 1.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 2.0), 'party'] = 'democrat'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 3.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 4.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 5.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 6.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 1.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 2.0), 'party'] = 'democrat'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 3.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 4.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'ND') & (df1['district'] == 1.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'WY') & (df1['district'] == 1.0), 'party'] = 'republican'
df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'CO') & (df1['district'] == 6.0), 'party'] = 'republican'
#democratic-farmer-labor -> democratic party (one entry in 2012)
df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'
#tax revolt -> republican party (one entry in 2012)
df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican'
#no clear indication which was to cast it, go by candidates closest affiliation between democrat/republican
#independent -> democrat (one entry in 2004 -- bernard sanders)
df2.loc[df2['party'] == 'independent', 'party'] = 'democrat'
#reform -> republican (two entires in 2002, 2004 -- henry e. brown jr., 2002 -- barbara dale washer)
df2.loc[df2['party'] == 'reform', 'party'] = 'republican'
#republican/democrat -> republican (one entry in 2002) -- Don Sherwood
df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'
#KS 1.0: republican (tea party) -- might be nan because he ran under republican party ticket but he's actually from tea party?
#KS 2.0: republican (tea party)
#KS 3.0: republican (?)
#KS 4.0: republican (tea party)
#LA 1.0: republican (it's complicated)
#LA 2.0: democrat (?)
#LA 3.0: republican
#if there is a run off election, we don't include it in the data. so vote counts could be iffy (e.g. see issues with verifying LA 3.0 vote counts)
#winner may be in correct then if the votes are not from the run-off election, like they should be! in this case..
#LA 4.0: republican (tea party? maybe...)
#LA 5.0: republican (tea party caucus)
#LA 6.0: republican (tea party)
#MS 1.0: republican
#MS 2.0: democrat (??)
#MS 3.0: republican
#MS 4.0: republican
#ND 0.0: republican
#WY 0.0: republican
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 1.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 2.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 3.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 4.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 1.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 2.0), 'party'] = 'democrat'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 3.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 4.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 5.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 6.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 1.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 2.0), 'party'] = 'democrat'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 3.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 4.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'ND') & (df2['district'] == 1.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'WY') & (df2['district'] == 1.0), 'party'] = 'republican'
df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'CO') & (df2['district'] == 6.0), 'party'] = 'republican'
########################################## ADDITIONAL PROCESSING W. ASSUMPTIONS ##########################################
poll = pickle.load(open('Datasets/national_poll.p', 'rb'))
for year in range(minYear, int(max(df1['year'].values))+1, 2):
#convention: t-> current election, t-2 (tm2) -> previous election
for index_t, row in df1.iterrows():
if row['year'] == year:
index_tm2 = index_t.replace(str(year), str(year-2))
if index_tm2 in df1.index:
#a district is dropped if it does not exist in all years being processed (implictly assuming districts are the same shape across all years)
#################### POLLING FEATURES ####################
poll_t = poll.loc[poll.index == index_t, 'national_poll'].values[0]
poll_tm2 = poll.loc[poll.index == index_tm2, 'national_poll'].values[0]
df1.loc[df1.index == index_t, 'national_poll'] = poll_t
df1.loc[df1.index == index_t, 'national_poll_prev'] = poll_tm2
df1.loc[df1.index == index_t, 'national_poll_delta_subtract'] = poll_t - poll_tm2
df1.loc[df1.index == index_t, 'national_poll_delta_divide'] = poll_t/poll_tm2
#################### POLLING FEATURES ####################
#################### PREVIOUS WINNERS ####################
df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[df1.index == index_tm2, 'party'].values[0]
#################### PREVIOUS WINNERS ####################
#################### MARGIN FEATURES ####################
#convention: when signed, always defined as dem +ve and rep -ve
winner_totalvotes = df1.loc[df1.index == index_tm2, 'totalvotes'].values[0]
loser_totalvotes = df2.loc[df2.index == index_tm2, 'totalvotes'].values[0]
if winner_totalvotes == 0:
winner_margin = 1
else:
winner_margin = (df1.loc[df1.index == index_tm2, 'candidatevotes'].values[0])/(winner_totalvotes)
if loser_totalvotes == 0:
loser_margin = 1
else:
loser_margin = (df2.loc[df2.index == index_tm2, 'candidatevotes'].values[0])/(loser_totalvotes)
if winner_margin == loser_margin:
#only 1 player
loser_margin = 1e-10
else:
loser_margin = (df2.loc[df2.index == index_tm2, 'candidatevotes'].values[0])/(df2.loc[df2.index == index_tm2, 'totalvotes'].values[0])
### see convention for 2nd winner when only 1 player ###
label_dem = 'dem_win_margin_prev'
label_rep = 'rep_win_margin_prev'
label_sm = 'margin_signed_minus_prev'
label_um = 'margin_unsigned_minus_prev'
label_sd = 'margin_signed_divide_prev'
label_ud = 'margin_unsigned_divide_prev'
if df1.loc[df1.index == index_tm2, 'party'].values[0] == 'democrat':
df1.loc[df1.index == index_t, label_dem] = winner_margin
df1.loc[df1.index == index_t, label_rep] = loser_margin
df1.loc[df1.index == index_t, label_sm] = winner_margin - loser_margin
if loser_margin != 0:
df1.loc[df1.index == index_t, label_sd] = winner_margin/loser_margin
else:
df1.loc[df1.index == index_t, label_sd] = winner_margin/(1e-10)
elif df1.loc[df1.index == index_tm2, 'party'].values[0] == 'republican':
df1.loc[df1.index == index_t, label_dem] = loser_margin
df1.loc[df1.index == index_t, label_rep] = winner_margin
df1.loc[df1.index == index_t, label_sm] = loser_margin - winner_margin
if winner_margin != 0:
df1.loc[df1.index == index_t, label_sd] = loser_margin/winner_margin
else:
df1.loc[df1.index == index_t, label_sd] = loser_margin/(1e-10)
df1.loc[df1.index == index_t, label_um] = winner_margin - loser_margin
if loser_margin != 0:
df1.loc[df1.index == index_t, label_ud] = winner_margin/loser_margin
else:
df1.loc[df1.index == index_t, label_ud] = winner_margin/(1e-10)
#if previous winner was democrat
#################### MARGIN FEATURES ####################
# to-do features
# incumbent? name check
# summary statistics for winning margins changing over time
else:
df1 = df1[df1.index != index_t]
#trim df1 down to only 1 election before minyear
df1 = df1[df1['year'] != minYear - 2]
#################### PREVIOUS WINNER FEATURES ####################
df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1
df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0
df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1
df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0
#################### PREVIOUS WINNER FEATURES ####################
#################### OBSERVED WINNER ####################
df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1
df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0
df1.loc[df1['party'] == 'republican', 'rep_win'] = 1
df1.loc[df1['party'] != 'republican', 'rep_win'] = 0
#################### OBSERVED WINNER ####################
return df1
|
[
"import pandas as pd\nimport numpy as np\nimport pickle\n'''\nParse 1976-2016 house data from https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/IG0UN2\n\n@relFilePath : file path of house results data, relative to python notebook\n\n@return: dataframe indexed by (year, state, district)\n'''\n\ndef load_data(relFilePath, minYear=2010):\n ''' Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n '''\n\n data_df = pd.read_csv(relFilePath)\n\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']): \n if int(key[0]) >= minYear:\n #convention: 2nd winner = 1st winner if only 1 player\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'].idxmax()])\n sortedIndices = (shard['candidatevotes'].values.argsort()[::-1])\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\ndef clean_index(df, clean_before_build=True):\n '''Performs general clean up tasks on the key columns. Generates the master key.\n arguments:\n df -- dataframe to clean up, should contain the columns 'district', 'state_po' and 'year' (pandas.dataframe)\n returns:\n dataframe with cleaned key columns and index (pandas.dataframe)\n '''\n if clean_before_build:\n # drop default index\n df = df.reset_index().drop(['index','state'], axis=1)\n # rename state code\n df = df.rename(columns={'state_po' : 'state'})\n #format year and district columns as ints\n df = df.astype({'year': int, 'district': int})\n #make sure all districts start with 1\n df.loc[df['district']==0, 'district'] = 1\n\n # glue together the columns to get a more descriptive index \n df.index = ['{0}_{1:02d}_{2}'.format(row['state'],row['district'],row['year']) for _,row in df.iterrows()]\n\n return df\n\ndef fetch_index(df, df2, save=False, load=False):\n '''Helper function for generating/loading master index for syncing between data sources.\n arguments:\n df -- dataframe to parse index from, MUST CONTAIN FULL COPIES OF THE 'district', 'state_po', 'year' COLUMNS (pandas.dataframe)\n returns:\n dataframe with master index for syncing between data sources.\n '''\n\n if not load:\n # Make a dummy dataframe so everyone else can make complete dataframes\n tmp1 = df[['district', 'state', 'year']]\n tmp2 = df2[['district', 'state', 'year']]\n master_index = pd.concat([tmp1, tmp2])\n\n if save:\n pickle.dump(master_index, open('Datasets/master_index.p', 'wb'))\n return master_index\n\n else:\n master_index = pickle.load(open('Datasets/master_index.p', 'rb'))\n return master_index \n\ndef fetch_trimmed_data(df1, df2, minYear=2012):\n '''Compile training data. Additional cleaning and processing to generate additional features.\n arguments:\n df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()\n df2 -- dataframe with 2nd place candidates for each race\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe containing training data.\n '''\n\n df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes', 'totalvotes', 'candidate']]\n df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes', 'totalvotes', 'candidate']]\n\n ########################################## ADDITIONAL CLEANING RELATED TO PARTY ##########################################\n\n #democratic-farmer-labor -> democratic party (one entry in 2012)\n df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n #tax revolt -> republican party (one entry in 2012)\n df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'\n\n #no clear indication which was to cast it, go by candidates closest affiliation between democrat/republican \n #independent -> democrat (one entry in 2004 -- bernard sanders)\n df1.loc[df1['party'] == 'independent', 'party'] = 'democrat' \n #reform -> republican (two entires in 2002, 2004 -- henry e. brown jr., 2002 -- barbara dale washer)\n df1.loc[df1['party'] == 'reform', 'party'] = 'republican'\n #republican/democrat -> republican (one entry in 2002) -- Don Sherwood\n df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'\n \n #KS 1.0: republican (tea party) -- might be nan because he ran under republican party ticket but he's actually from tea party?\n #KS 2.0: republican (tea party)\n #KS 3.0: republican (?)\n #KS 4.0: republican (tea party)\n #LA 1.0: republican (it's complicated)\n #LA 2.0: democrat (?)\n #LA 3.0: republican\n #if there is a run off election, we don't include it in the data. so vote counts could be iffy (e.g. see issues with verifying LA 3.0 vote counts)\n #winner may be in correct then if the votes are not from the run-off election, like they should be! in this case..\n #LA 4.0: republican (tea party? maybe...)\n #LA 5.0: republican (tea party caucus)\n #LA 6.0: republican (tea party)\n #MS 1.0: republican\n #MS 2.0: democrat (??)\n #MS 3.0: republican\n #MS 4.0: republican\n #ND 0.0: republican\n #WY 0.0: republican\n\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 1.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 2.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 3.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'KS') & (df1['district'] == 4.0), 'party'] = 'republican'\n \n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 1.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 2.0), 'party'] = 'democrat'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 3.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 4.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 5.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'LA') & (df1['district'] == 6.0), 'party'] = 'republican'\n \n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 1.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 2.0), 'party'] = 'democrat'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 3.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'MS') & (df1['district'] == 4.0), 'party'] = 'republican'\n \n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'ND') & (df1['district'] == 1.0), 'party'] = 'republican'\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'WY') & (df1['district'] == 1.0), 'party'] = 'republican'\n\n df1.loc[(pd.isnull(df1['party'])) & (df1['state'] == 'CO') & (df1['district'] == 6.0), 'party'] = 'republican'\n\n #democratic-farmer-labor -> democratic party (one entry in 2012)\n df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n #tax revolt -> republican party (one entry in 2012)\n df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican' \n \n #no clear indication which was to cast it, go by candidates closest affiliation between democrat/republican \n #independent -> democrat (one entry in 2004 -- bernard sanders)\n df2.loc[df2['party'] == 'independent', 'party'] = 'democrat' \n #reform -> republican (two entires in 2002, 2004 -- henry e. brown jr., 2002 -- barbara dale washer)\n df2.loc[df2['party'] == 'reform', 'party'] = 'republican'\n #republican/democrat -> republican (one entry in 2002) -- Don Sherwood\n df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'\n \n #KS 1.0: republican (tea party) -- might be nan because he ran under republican party ticket but he's actually from tea party?\n #KS 2.0: republican (tea party)\n #KS 3.0: republican (?)\n #KS 4.0: republican (tea party)\n #LA 1.0: republican (it's complicated)\n #LA 2.0: democrat (?)\n #LA 3.0: republican\n #if there is a run off election, we don't include it in the data. so vote counts could be iffy (e.g. see issues with verifying LA 3.0 vote counts)\n #winner may be in correct then if the votes are not from the run-off election, like they should be! in this case..\n #LA 4.0: republican (tea party? maybe...)\n #LA 5.0: republican (tea party caucus)\n #LA 6.0: republican (tea party)\n #MS 1.0: republican\n #MS 2.0: democrat (??)\n #MS 3.0: republican\n #MS 4.0: republican\n #ND 0.0: republican\n #WY 0.0: republican\n\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 1.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 2.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 3.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'KS') & (df2['district'] == 4.0), 'party'] = 'republican'\n \n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 1.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 2.0), 'party'] = 'democrat'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 3.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 4.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 5.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'LA') & (df2['district'] == 6.0), 'party'] = 'republican'\n \n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 1.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 2.0), 'party'] = 'democrat'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 3.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'MS') & (df2['district'] == 4.0), 'party'] = 'republican'\n \n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'ND') & (df2['district'] == 1.0), 'party'] = 'republican'\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'WY') & (df2['district'] == 1.0), 'party'] = 'republican'\n\n df2.loc[(pd.isnull(df2['party'])) & (df2['state'] == 'CO') & (df2['district'] == 6.0), 'party'] = 'republican'\n\n \n ########################################## ADDITIONAL PROCESSING W. ASSUMPTIONS ##########################################\n \n\n poll = pickle.load(open('Datasets/national_poll.p', 'rb'))\n for year in range(minYear, int(max(df1['year'].values))+1, 2):\n #convention: t-> current election, t-2 (tm2) -> previous election\n for index_t, row in df1.iterrows():\n if row['year'] == year:\n index_tm2 = index_t.replace(str(year), str(year-2))\n if index_tm2 in df1.index:\n #a district is dropped if it does not exist in all years being processed (implictly assuming districts are the same shape across all years)\n\n #################### POLLING FEATURES ####################\n poll_t = poll.loc[poll.index == index_t, 'national_poll'].values[0]\n poll_tm2 = poll.loc[poll.index == index_tm2, 'national_poll'].values[0]\n df1.loc[df1.index == index_t, 'national_poll'] = poll_t\n df1.loc[df1.index == index_t, 'national_poll_prev'] = poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_subtract'] = poll_t - poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_divide'] = poll_t/poll_tm2\n #################### POLLING FEATURES ####################\n\n #################### PREVIOUS WINNERS ####################\n df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[df1.index == index_tm2, 'party'].values[0]\n #################### PREVIOUS WINNERS ####################\n\n \n #################### MARGIN FEATURES ####################\n #convention: when signed, always defined as dem +ve and rep -ve\n winner_totalvotes = df1.loc[df1.index == index_tm2, 'totalvotes'].values[0]\n loser_totalvotes = df2.loc[df2.index == index_tm2, 'totalvotes'].values[0]\n if winner_totalvotes == 0:\n winner_margin = 1\n else:\n winner_margin = (df1.loc[df1.index == index_tm2, 'candidatevotes'].values[0])/(winner_totalvotes)\n if loser_totalvotes == 0:\n loser_margin = 1\n else:\n loser_margin = (df2.loc[df2.index == index_tm2, 'candidatevotes'].values[0])/(loser_totalvotes)\n\n if winner_margin == loser_margin:\n #only 1 player\n loser_margin = 1e-10\n else:\n loser_margin = (df2.loc[df2.index == index_tm2, 'candidatevotes'].values[0])/(df2.loc[df2.index == index_tm2, 'totalvotes'].values[0]) \n ### see convention for 2nd winner when only 1 player ###\n\n label_dem = 'dem_win_margin_prev'\n label_rep = 'rep_win_margin_prev'\n label_sm = 'margin_signed_minus_prev'\n label_um = 'margin_unsigned_minus_prev'\n label_sd = 'margin_signed_divide_prev'\n label_ud = 'margin_unsigned_divide_prev'\n\n if df1.loc[df1.index == index_tm2, 'party'].values[0] == 'democrat':\n\n df1.loc[df1.index == index_t, label_dem] = winner_margin\n df1.loc[df1.index == index_t, label_rep] = loser_margin\n\n df1.loc[df1.index == index_t, label_sm] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_sd] = winner_margin/loser_margin\n else:\n df1.loc[df1.index == index_t, label_sd] = winner_margin/(1e-10)\n\n elif df1.loc[df1.index == index_tm2, 'party'].values[0] == 'republican':\n\n df1.loc[df1.index == index_t, label_dem] = loser_margin\n df1.loc[df1.index == index_t, label_rep] = winner_margin\n\n df1.loc[df1.index == index_t, label_sm] = loser_margin - winner_margin\n if winner_margin != 0:\n df1.loc[df1.index == index_t, label_sd] = loser_margin/winner_margin\n else:\n df1.loc[df1.index == index_t, label_sd] = loser_margin/(1e-10)\n\n df1.loc[df1.index == index_t, label_um] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_ud] = winner_margin/loser_margin\n else:\n df1.loc[df1.index == index_t, label_ud] = winner_margin/(1e-10)\n \n #if previous winner was democrat\n\n #################### MARGIN FEATURES ####################\n\n # to-do features\n # incumbent? name check\n # summary statistics for winning margins changing over time\n\n else:\n df1 = df1[df1.index != index_t]\n\n #trim df1 down to only 1 election before minyear\n df1 = df1[df1['year'] != minYear - 2]\n\n #################### PREVIOUS WINNER FEATURES ####################\n df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0\n\n df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0\n #################### PREVIOUS WINNER FEATURES ####################\n\n\n #################### OBSERVED WINNER ####################\n df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1\n df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0\n\n df1.loc[df1['party'] == 'republican', 'rep_win'] = 1\n df1.loc[df1['party'] != 'republican', 'rep_win'] = 0\n #################### OBSERVED WINNER ####################\n return df1 ",
"import pandas as pd\nimport numpy as np\nimport pickle\n<docstring token>\n\n\ndef load_data(relFilePath, minYear=2010):\n \"\"\" Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n \"\"\"\n data_df = pd.read_csv(relFilePath)\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']):\n if int(key[0]) >= minYear:\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'\n ].idxmax()])\n sortedIndices = shard['candidatevotes'].values.argsort()[::-1]\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\n\ndef clean_index(df, clean_before_build=True):\n \"\"\"Performs general clean up tasks on the key columns. Generates the master key.\n arguments:\n df -- dataframe to clean up, should contain the columns 'district', 'state_po' and 'year' (pandas.dataframe)\n returns:\n dataframe with cleaned key columns and index (pandas.dataframe)\n \"\"\"\n if clean_before_build:\n df = df.reset_index().drop(['index', 'state'], axis=1)\n df = df.rename(columns={'state_po': 'state'})\n df = df.astype({'year': int, 'district': int})\n df.loc[df['district'] == 0, 'district'] = 1\n df.index = ['{0}_{1:02d}_{2}'.format(row['state'], row['district'], row\n ['year']) for _, row in df.iterrows()]\n return df\n\n\ndef fetch_index(df, df2, save=False, load=False):\n \"\"\"Helper function for generating/loading master index for syncing between data sources.\n arguments:\n df -- dataframe to parse index from, MUST CONTAIN FULL COPIES OF THE 'district', 'state_po', 'year' COLUMNS (pandas.dataframe)\n returns:\n dataframe with master index for syncing between data sources.\n \"\"\"\n if not load:\n tmp1 = df[['district', 'state', 'year']]\n tmp2 = df2[['district', 'state', 'year']]\n master_index = pd.concat([tmp1, tmp2])\n if save:\n pickle.dump(master_index, open('Datasets/master_index.p', 'wb'))\n return master_index\n else:\n master_index = pickle.load(open('Datasets/master_index.p', 'rb'))\n return master_index\n\n\ndef fetch_trimmed_data(df1, df2, minYear=2012):\n \"\"\"Compile training data. Additional cleaning and processing to generate additional features.\n arguments:\n df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()\n df2 -- dataframe with 2nd place candidates for each race\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe containing training data.\n \"\"\"\n df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'\n df1.loc[df1['party'] == 'independent', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'reform', 'party'] = 'republican'\n df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 2.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 5.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'ND') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'WY') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'CO') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican'\n df2.loc[df2['party'] == 'independent', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'reform', 'party'] = 'republican'\n df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 2.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 5.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'ND') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'WY') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'CO') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n poll = pickle.load(open('Datasets/national_poll.p', 'rb'))\n for year in range(minYear, int(max(df1['year'].values)) + 1, 2):\n for index_t, row in df1.iterrows():\n if row['year'] == year:\n index_tm2 = index_t.replace(str(year), str(year - 2))\n if index_tm2 in df1.index:\n poll_t = poll.loc[poll.index == index_t, 'national_poll'\n ].values[0]\n poll_tm2 = poll.loc[poll.index == index_tm2,\n 'national_poll'].values[0]\n df1.loc[df1.index == index_t, 'national_poll'] = poll_t\n df1.loc[df1.index == index_t, 'national_poll_prev'\n ] = poll_tm2\n df1.loc[df1.index == index_t,\n 'national_poll_delta_subtract'] = poll_t - poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_divide'\n ] = poll_t / poll_tm2\n df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[\n df1.index == index_tm2, 'party'].values[0]\n winner_totalvotes = df1.loc[df1.index == index_tm2,\n 'totalvotes'].values[0]\n loser_totalvotes = df2.loc[df2.index == index_tm2,\n 'totalvotes'].values[0]\n if winner_totalvotes == 0:\n winner_margin = 1\n else:\n winner_margin = df1.loc[df1.index == index_tm2,\n 'candidatevotes'].values[0] / winner_totalvotes\n if loser_totalvotes == 0:\n loser_margin = 1\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / loser_totalvotes\n if winner_margin == loser_margin:\n loser_margin = 1e-10\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / df2.loc[df2.index ==\n index_tm2, 'totalvotes'].values[0]\n label_dem = 'dem_win_margin_prev'\n label_rep = 'rep_win_margin_prev'\n label_sm = 'margin_signed_minus_prev'\n label_um = 'margin_unsigned_minus_prev'\n label_sd = 'margin_signed_divide_prev'\n label_ud = 'margin_unsigned_divide_prev'\n if df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'democrat':\n df1.loc[df1.index == index_t, label_dem\n ] = winner_margin\n df1.loc[df1.index == index_t, label_rep] = loser_margin\n df1.loc[df1.index == index_t, label_sm\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / 1e-10\n elif df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'republican':\n df1.loc[df1.index == index_t, label_dem] = loser_margin\n df1.loc[df1.index == index_t, label_rep\n ] = winner_margin\n df1.loc[df1.index == index_t, label_sm\n ] = loser_margin - winner_margin\n if winner_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / winner_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / 1e-10\n df1.loc[df1.index == index_t, label_um\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / 1e-10\n else:\n df1 = df1[df1.index != index_t]\n df1 = df1[df1['year'] != minYear - 2]\n df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0\n df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0\n df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1\n df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0\n df1.loc[df1['party'] == 'republican', 'rep_win'] = 1\n df1.loc[df1['party'] != 'republican', 'rep_win'] = 0\n return df1\n",
"<import token>\n<docstring token>\n\n\ndef load_data(relFilePath, minYear=2010):\n \"\"\" Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n \"\"\"\n data_df = pd.read_csv(relFilePath)\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']):\n if int(key[0]) >= minYear:\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'\n ].idxmax()])\n sortedIndices = shard['candidatevotes'].values.argsort()[::-1]\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\n\ndef clean_index(df, clean_before_build=True):\n \"\"\"Performs general clean up tasks on the key columns. Generates the master key.\n arguments:\n df -- dataframe to clean up, should contain the columns 'district', 'state_po' and 'year' (pandas.dataframe)\n returns:\n dataframe with cleaned key columns and index (pandas.dataframe)\n \"\"\"\n if clean_before_build:\n df = df.reset_index().drop(['index', 'state'], axis=1)\n df = df.rename(columns={'state_po': 'state'})\n df = df.astype({'year': int, 'district': int})\n df.loc[df['district'] == 0, 'district'] = 1\n df.index = ['{0}_{1:02d}_{2}'.format(row['state'], row['district'], row\n ['year']) for _, row in df.iterrows()]\n return df\n\n\ndef fetch_index(df, df2, save=False, load=False):\n \"\"\"Helper function for generating/loading master index for syncing between data sources.\n arguments:\n df -- dataframe to parse index from, MUST CONTAIN FULL COPIES OF THE 'district', 'state_po', 'year' COLUMNS (pandas.dataframe)\n returns:\n dataframe with master index for syncing between data sources.\n \"\"\"\n if not load:\n tmp1 = df[['district', 'state', 'year']]\n tmp2 = df2[['district', 'state', 'year']]\n master_index = pd.concat([tmp1, tmp2])\n if save:\n pickle.dump(master_index, open('Datasets/master_index.p', 'wb'))\n return master_index\n else:\n master_index = pickle.load(open('Datasets/master_index.p', 'rb'))\n return master_index\n\n\ndef fetch_trimmed_data(df1, df2, minYear=2012):\n \"\"\"Compile training data. Additional cleaning and processing to generate additional features.\n arguments:\n df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()\n df2 -- dataframe with 2nd place candidates for each race\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe containing training data.\n \"\"\"\n df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'\n df1.loc[df1['party'] == 'independent', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'reform', 'party'] = 'republican'\n df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 2.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 5.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'ND') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'WY') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'CO') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican'\n df2.loc[df2['party'] == 'independent', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'reform', 'party'] = 'republican'\n df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 2.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 5.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'ND') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'WY') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'CO') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n poll = pickle.load(open('Datasets/national_poll.p', 'rb'))\n for year in range(minYear, int(max(df1['year'].values)) + 1, 2):\n for index_t, row in df1.iterrows():\n if row['year'] == year:\n index_tm2 = index_t.replace(str(year), str(year - 2))\n if index_tm2 in df1.index:\n poll_t = poll.loc[poll.index == index_t, 'national_poll'\n ].values[0]\n poll_tm2 = poll.loc[poll.index == index_tm2,\n 'national_poll'].values[0]\n df1.loc[df1.index == index_t, 'national_poll'] = poll_t\n df1.loc[df1.index == index_t, 'national_poll_prev'\n ] = poll_tm2\n df1.loc[df1.index == index_t,\n 'national_poll_delta_subtract'] = poll_t - poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_divide'\n ] = poll_t / poll_tm2\n df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[\n df1.index == index_tm2, 'party'].values[0]\n winner_totalvotes = df1.loc[df1.index == index_tm2,\n 'totalvotes'].values[0]\n loser_totalvotes = df2.loc[df2.index == index_tm2,\n 'totalvotes'].values[0]\n if winner_totalvotes == 0:\n winner_margin = 1\n else:\n winner_margin = df1.loc[df1.index == index_tm2,\n 'candidatevotes'].values[0] / winner_totalvotes\n if loser_totalvotes == 0:\n loser_margin = 1\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / loser_totalvotes\n if winner_margin == loser_margin:\n loser_margin = 1e-10\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / df2.loc[df2.index ==\n index_tm2, 'totalvotes'].values[0]\n label_dem = 'dem_win_margin_prev'\n label_rep = 'rep_win_margin_prev'\n label_sm = 'margin_signed_minus_prev'\n label_um = 'margin_unsigned_minus_prev'\n label_sd = 'margin_signed_divide_prev'\n label_ud = 'margin_unsigned_divide_prev'\n if df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'democrat':\n df1.loc[df1.index == index_t, label_dem\n ] = winner_margin\n df1.loc[df1.index == index_t, label_rep] = loser_margin\n df1.loc[df1.index == index_t, label_sm\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / 1e-10\n elif df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'republican':\n df1.loc[df1.index == index_t, label_dem] = loser_margin\n df1.loc[df1.index == index_t, label_rep\n ] = winner_margin\n df1.loc[df1.index == index_t, label_sm\n ] = loser_margin - winner_margin\n if winner_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / winner_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / 1e-10\n df1.loc[df1.index == index_t, label_um\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / 1e-10\n else:\n df1 = df1[df1.index != index_t]\n df1 = df1[df1['year'] != minYear - 2]\n df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0\n df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0\n df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1\n df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0\n df1.loc[df1['party'] == 'republican', 'rep_win'] = 1\n df1.loc[df1['party'] != 'republican', 'rep_win'] = 0\n return df1\n",
"<import token>\n<docstring token>\n\n\ndef load_data(relFilePath, minYear=2010):\n \"\"\" Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n \"\"\"\n data_df = pd.read_csv(relFilePath)\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']):\n if int(key[0]) >= minYear:\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'\n ].idxmax()])\n sortedIndices = shard['candidatevotes'].values.argsort()[::-1]\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\n\n<function token>\n\n\ndef fetch_index(df, df2, save=False, load=False):\n \"\"\"Helper function for generating/loading master index for syncing between data sources.\n arguments:\n df -- dataframe to parse index from, MUST CONTAIN FULL COPIES OF THE 'district', 'state_po', 'year' COLUMNS (pandas.dataframe)\n returns:\n dataframe with master index for syncing between data sources.\n \"\"\"\n if not load:\n tmp1 = df[['district', 'state', 'year']]\n tmp2 = df2[['district', 'state', 'year']]\n master_index = pd.concat([tmp1, tmp2])\n if save:\n pickle.dump(master_index, open('Datasets/master_index.p', 'wb'))\n return master_index\n else:\n master_index = pickle.load(open('Datasets/master_index.p', 'rb'))\n return master_index\n\n\ndef fetch_trimmed_data(df1, df2, minYear=2012):\n \"\"\"Compile training data. Additional cleaning and processing to generate additional features.\n arguments:\n df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()\n df2 -- dataframe with 2nd place candidates for each race\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe containing training data.\n \"\"\"\n df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'\n df1.loc[df1['party'] == 'independent', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'reform', 'party'] = 'republican'\n df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 2.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 5.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'ND') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'WY') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'CO') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican'\n df2.loc[df2['party'] == 'independent', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'reform', 'party'] = 'republican'\n df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 2.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 5.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'ND') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'WY') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'CO') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n poll = pickle.load(open('Datasets/national_poll.p', 'rb'))\n for year in range(minYear, int(max(df1['year'].values)) + 1, 2):\n for index_t, row in df1.iterrows():\n if row['year'] == year:\n index_tm2 = index_t.replace(str(year), str(year - 2))\n if index_tm2 in df1.index:\n poll_t = poll.loc[poll.index == index_t, 'national_poll'\n ].values[0]\n poll_tm2 = poll.loc[poll.index == index_tm2,\n 'national_poll'].values[0]\n df1.loc[df1.index == index_t, 'national_poll'] = poll_t\n df1.loc[df1.index == index_t, 'national_poll_prev'\n ] = poll_tm2\n df1.loc[df1.index == index_t,\n 'national_poll_delta_subtract'] = poll_t - poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_divide'\n ] = poll_t / poll_tm2\n df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[\n df1.index == index_tm2, 'party'].values[0]\n winner_totalvotes = df1.loc[df1.index == index_tm2,\n 'totalvotes'].values[0]\n loser_totalvotes = df2.loc[df2.index == index_tm2,\n 'totalvotes'].values[0]\n if winner_totalvotes == 0:\n winner_margin = 1\n else:\n winner_margin = df1.loc[df1.index == index_tm2,\n 'candidatevotes'].values[0] / winner_totalvotes\n if loser_totalvotes == 0:\n loser_margin = 1\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / loser_totalvotes\n if winner_margin == loser_margin:\n loser_margin = 1e-10\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / df2.loc[df2.index ==\n index_tm2, 'totalvotes'].values[0]\n label_dem = 'dem_win_margin_prev'\n label_rep = 'rep_win_margin_prev'\n label_sm = 'margin_signed_minus_prev'\n label_um = 'margin_unsigned_minus_prev'\n label_sd = 'margin_signed_divide_prev'\n label_ud = 'margin_unsigned_divide_prev'\n if df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'democrat':\n df1.loc[df1.index == index_t, label_dem\n ] = winner_margin\n df1.loc[df1.index == index_t, label_rep] = loser_margin\n df1.loc[df1.index == index_t, label_sm\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / 1e-10\n elif df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'republican':\n df1.loc[df1.index == index_t, label_dem] = loser_margin\n df1.loc[df1.index == index_t, label_rep\n ] = winner_margin\n df1.loc[df1.index == index_t, label_sm\n ] = loser_margin - winner_margin\n if winner_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / winner_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / 1e-10\n df1.loc[df1.index == index_t, label_um\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / 1e-10\n else:\n df1 = df1[df1.index != index_t]\n df1 = df1[df1['year'] != minYear - 2]\n df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0\n df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0\n df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1\n df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0\n df1.loc[df1['party'] == 'republican', 'rep_win'] = 1\n df1.loc[df1['party'] != 'republican', 'rep_win'] = 0\n return df1\n",
"<import token>\n<docstring token>\n\n\ndef load_data(relFilePath, minYear=2010):\n \"\"\" Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n \"\"\"\n data_df = pd.read_csv(relFilePath)\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']):\n if int(key[0]) >= minYear:\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'\n ].idxmax()])\n sortedIndices = shard['candidatevotes'].values.argsort()[::-1]\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\n\n<function token>\n<function token>\n\n\ndef fetch_trimmed_data(df1, df2, minYear=2012):\n \"\"\"Compile training data. Additional cleaning and processing to generate additional features.\n arguments:\n df1 -- dataframe to compile training data from, should be loaded through load_data() and cleaned with clean_index()\n df2 -- dataframe with 2nd place candidates for each race\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe containing training data.\n \"\"\"\n df1 = df1[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df2 = df2[['district', 'state', 'year', 'party', 'candidatevotes',\n 'totalvotes', 'candidate']]\n df1.loc[df1['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'tax revolt', 'party'] = 'republican'\n df1.loc[df1['party'] == 'independent', 'party'] = 'democrat'\n df1.loc[df1['party'] == 'reform', 'party'] = 'republican'\n df1.loc[df1['party'] == 'republican/democrat', 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 2.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'KS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 5.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'LA') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 2.0), 'party'] = 'democrat'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 3.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'MS') & (df1[\n 'district'] == 4.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'ND') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'WY') & (df1[\n 'district'] == 1.0), 'party'] = 'republican'\n df1.loc[pd.isnull(df1['party']) & (df1['state'] == 'CO') & (df1[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[df2['party'] == 'democratic-farmer-labor', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'tax revolt', 'party'] = 'republican'\n df2.loc[df2['party'] == 'independent', 'party'] = 'democrat'\n df2.loc[df2['party'] == 'reform', 'party'] = 'republican'\n df2.loc[df2['party'] == 'republican/democrat', 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 2.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'KS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 5.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'LA') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 2.0), 'party'] = 'democrat'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 3.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'MS') & (df2[\n 'district'] == 4.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'ND') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'WY') & (df2[\n 'district'] == 1.0), 'party'] = 'republican'\n df2.loc[pd.isnull(df2['party']) & (df2['state'] == 'CO') & (df2[\n 'district'] == 6.0), 'party'] = 'republican'\n poll = pickle.load(open('Datasets/national_poll.p', 'rb'))\n for year in range(minYear, int(max(df1['year'].values)) + 1, 2):\n for index_t, row in df1.iterrows():\n if row['year'] == year:\n index_tm2 = index_t.replace(str(year), str(year - 2))\n if index_tm2 in df1.index:\n poll_t = poll.loc[poll.index == index_t, 'national_poll'\n ].values[0]\n poll_tm2 = poll.loc[poll.index == index_tm2,\n 'national_poll'].values[0]\n df1.loc[df1.index == index_t, 'national_poll'] = poll_t\n df1.loc[df1.index == index_t, 'national_poll_prev'\n ] = poll_tm2\n df1.loc[df1.index == index_t,\n 'national_poll_delta_subtract'] = poll_t - poll_tm2\n df1.loc[df1.index == index_t, 'national_poll_delta_divide'\n ] = poll_t / poll_tm2\n df1.loc[df1.index == index_t, 'previous_party'] = df1.loc[\n df1.index == index_tm2, 'party'].values[0]\n winner_totalvotes = df1.loc[df1.index == index_tm2,\n 'totalvotes'].values[0]\n loser_totalvotes = df2.loc[df2.index == index_tm2,\n 'totalvotes'].values[0]\n if winner_totalvotes == 0:\n winner_margin = 1\n else:\n winner_margin = df1.loc[df1.index == index_tm2,\n 'candidatevotes'].values[0] / winner_totalvotes\n if loser_totalvotes == 0:\n loser_margin = 1\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / loser_totalvotes\n if winner_margin == loser_margin:\n loser_margin = 1e-10\n else:\n loser_margin = df2.loc[df2.index == index_tm2,\n 'candidatevotes'].values[0] / df2.loc[df2.index ==\n index_tm2, 'totalvotes'].values[0]\n label_dem = 'dem_win_margin_prev'\n label_rep = 'rep_win_margin_prev'\n label_sm = 'margin_signed_minus_prev'\n label_um = 'margin_unsigned_minus_prev'\n label_sd = 'margin_signed_divide_prev'\n label_ud = 'margin_unsigned_divide_prev'\n if df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'democrat':\n df1.loc[df1.index == index_t, label_dem\n ] = winner_margin\n df1.loc[df1.index == index_t, label_rep] = loser_margin\n df1.loc[df1.index == index_t, label_sm\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = winner_margin / 1e-10\n elif df1.loc[df1.index == index_tm2, 'party'].values[0\n ] == 'republican':\n df1.loc[df1.index == index_t, label_dem] = loser_margin\n df1.loc[df1.index == index_t, label_rep\n ] = winner_margin\n df1.loc[df1.index == index_t, label_sm\n ] = loser_margin - winner_margin\n if winner_margin != 0:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / winner_margin\n else:\n df1.loc[df1.index == index_t, label_sd\n ] = loser_margin / 1e-10\n df1.loc[df1.index == index_t, label_um\n ] = winner_margin - loser_margin\n if loser_margin != 0:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / loser_margin\n else:\n df1.loc[df1.index == index_t, label_ud\n ] = winner_margin / 1e-10\n else:\n df1 = df1[df1.index != index_t]\n df1 = df1[df1['year'] != minYear - 2]\n df1.loc[df1['previous_party'] == 'democrat', 'dem_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'democrat', 'dem_win_prev'] = 0\n df1.loc[df1['previous_party'] == 'republican', 'rep_win_prev'] = 1\n df1.loc[df1['previous_party'] != 'republican', 'rep_win_prev'] = 0\n df1.loc[df1['party'] == 'democrat', 'dem_win'] = 1\n df1.loc[df1['party'] != 'democrat', 'dem_win'] = 0\n df1.loc[df1['party'] == 'republican', 'rep_win'] = 1\n df1.loc[df1['party'] != 'republican', 'rep_win'] = 0\n return df1\n",
"<import token>\n<docstring token>\n\n\ndef load_data(relFilePath, minYear=2010):\n \"\"\" Keep only the winner and 2nd place candidates within each state's district for every year.\n arguments:\n relFilePath -- path to the data file (csv)\n minYear -- only records for all years from and after the min year will be kept (int)\n returns:\n dataframe with only the winners (pandas.dataframe)\n dataframe with only the 2nd place candidates (pandas.dataframe)\n \"\"\"\n data_df = pd.read_csv(relFilePath)\n winners_df = pd.DataFrame()\n winners2_df = pd.DataFrame()\n for key, shard in data_df.groupby(['year', 'state_po', 'district']):\n if int(key[0]) >= minYear:\n winners_df = winners_df.append(shard.loc[shard['candidatevotes'\n ].idxmax()])\n sortedIndices = shard['candidatevotes'].values.argsort()[::-1]\n if len(sortedIndices) > 1:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[1]])\n else:\n winners2_df = winners2_df.append(shard.iloc[sortedIndices[0]])\n return winners_df, winners2_df\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,575 |
20baf2925451fdcaeb74e96728952070dd85e709
|
import requests
latest_release = requests.get('https://api.github.com/repos/TryGhost/Ghost/releases/latest').json()
latest_version = latest_release['tag_name']
print(latest_version)
|
[
"import requests\n\nlatest_release = requests.get('https://api.github.com/repos/TryGhost/Ghost/releases/latest').json()\nlatest_version = latest_release['tag_name']\n\nprint(latest_version)\n",
"import requests\nlatest_release = requests.get(\n 'https://api.github.com/repos/TryGhost/Ghost/releases/latest').json()\nlatest_version = latest_release['tag_name']\nprint(latest_version)\n",
"<import token>\nlatest_release = requests.get(\n 'https://api.github.com/repos/TryGhost/Ghost/releases/latest').json()\nlatest_version = latest_release['tag_name']\nprint(latest_version)\n",
"<import token>\n<assignment token>\nprint(latest_version)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,576 |
989eaf806111c5cd1b3b7208fa4e32a6df6ffdd0
|
import re
import pprint
m = {}
ggg = []
s = set()
wp = []
conf = '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"'
regex = ''.join(
'(?P<' + g + '>.*?)' if g else re.escape(c)
for g, c in re.findall(r'\$(\w+)|(.)', conf))
with open('log.txt', 'r') as log_file:
for line in log_file.readlines():
m = re.match(regex, line)
ggg.append(m.groupdict())
for i in ggg:
s.add(i['status'])
for i in ggg:
if i['status'] == '403':
wp.append(i)
print (s)
pprint.pprint(wp)
|
[
"import re\r\nimport pprint\r\n\r\nm = {}\r\nggg = []\r\ns = set()\r\nwp = []\r\n\r\nconf = '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\"'\r\nregex = ''.join(\r\n '(?P<' + g + '>.*?)' if g else re.escape(c)\r\n for g, c in re.findall(r'\\$(\\w+)|(.)', conf))\r\n\r\nwith open('log.txt', 'r') as log_file:\r\n for line in log_file.readlines():\r\n m = re.match(regex, line)\r\n ggg.append(m.groupdict())\r\n\r\nfor i in ggg:\r\n s.add(i['status'])\r\n\r\nfor i in ggg:\r\n if i['status'] == '403':\r\n wp.append(i)\r\n\r\n\r\nprint (s)\r\npprint.pprint(wp)",
"import re\nimport pprint\nm = {}\nggg = []\ns = set()\nwp = []\nconf = (\n '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\"'\n )\nregex = ''.join('(?P<' + g + '>.*?)' if g else re.escape(c) for g, c in re.\n findall('\\\\$(\\\\w+)|(.)', conf))\nwith open('log.txt', 'r') as log_file:\n for line in log_file.readlines():\n m = re.match(regex, line)\n ggg.append(m.groupdict())\nfor i in ggg:\n s.add(i['status'])\nfor i in ggg:\n if i['status'] == '403':\n wp.append(i)\nprint(s)\npprint.pprint(wp)\n",
"<import token>\nm = {}\nggg = []\ns = set()\nwp = []\nconf = (\n '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\"'\n )\nregex = ''.join('(?P<' + g + '>.*?)' if g else re.escape(c) for g, c in re.\n findall('\\\\$(\\\\w+)|(.)', conf))\nwith open('log.txt', 'r') as log_file:\n for line in log_file.readlines():\n m = re.match(regex, line)\n ggg.append(m.groupdict())\nfor i in ggg:\n s.add(i['status'])\nfor i in ggg:\n if i['status'] == '403':\n wp.append(i)\nprint(s)\npprint.pprint(wp)\n",
"<import token>\n<assignment token>\nwith open('log.txt', 'r') as log_file:\n for line in log_file.readlines():\n m = re.match(regex, line)\n ggg.append(m.groupdict())\nfor i in ggg:\n s.add(i['status'])\nfor i in ggg:\n if i['status'] == '403':\n wp.append(i)\nprint(s)\npprint.pprint(wp)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,577 |
0e16d587a7eea2845145a1bf97d90795b7aff08c
|
from oauth import *
class FavoriteListener(StreamListener):
def on_status(self, status):
f = open(os.path.abspath(os.path.dirname(__file__)) + '/../data/id.txt', 'r')
userList = f.readlines()
for i in range(len(userList)):
userList[i] = userList[i].replace('\n', '')
if(status.author.screen_name in userList):
API(getOauth()).create_favorite(status.id)
return True
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True
def on_timeout(self):
print('Timeout...')
return True
def main():
listener = FavoriteListener()
stream = Stream(getOauth(), listener)
stream.userstream()
if __name__ == '__main__':
main()
|
[
"from oauth import *\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) + '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n \n if(status.author.screen_name in userList):\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n\n def on_timeout(self):\n print('Timeout...')\n return True\n\ndef main():\n listener = FavoriteListener()\n stream = Stream(getOauth(), listener)\n stream.userstream()\n\nif __name__ == '__main__':\n main()\n",
"from oauth import *\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n\n def on_timeout(self):\n print('Timeout...')\n return True\n\n\ndef main():\n listener = FavoriteListener()\n stream = Stream(getOauth(), listener)\n stream.userstream()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n\n def on_timeout(self):\n print('Timeout...')\n return True\n\n\ndef main():\n listener = FavoriteListener()\n stream = Stream(getOauth(), listener)\n stream.userstream()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n\n def on_timeout(self):\n print('Timeout...')\n return True\n\n\ndef main():\n listener = FavoriteListener()\n stream = Stream(getOauth(), listener)\n stream.userstream()\n\n\n<code token>\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n\n def on_timeout(self):\n print('Timeout...')\n return True\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n\n def on_error(self, status_code):\n print('Got an error with status code: ' + str(status_code))\n return True\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n\n def on_status(self, status):\n f = open(os.path.abspath(os.path.dirname(__file__)) +\n '/../data/id.txt', 'r')\n userList = f.readlines()\n for i in range(len(userList)):\n userList[i] = userList[i].replace('\\n', '')\n if status.author.screen_name in userList:\n API(getOauth()).create_favorite(status.id)\n return True\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass FavoriteListener(StreamListener):\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,578 |
13558a4c9677778248ba35096283a0a5fd24e7ff
|
# coding=utf8
''' hackerrank'''
''' python version 2.7 '''
'''
@author: Michael Wan
@since: 2014-12-31
@requires: xhtml2pdf https://pypi.python.org/pypi/xhtml2pdf/, reportlab, html5lib, PyPDF(optional)
'''
from newspdf.xhtml2pdf import pisa
data = open('test.html').read()
result = file('test.pdf','wb')
pdf = pisa.CreatePDF(data, result)
result.close()
pisa.startViewer('test.pdf')
|
[
"# coding=utf8\r\n\r\n''' hackerrank'''\r\n''' python version 2.7 '''\r\n'''\r\n@author: Michael Wan\r\n@since: 2014-12-31\r\n@requires: xhtml2pdf https://pypi.python.org/pypi/xhtml2pdf/, reportlab, html5lib, PyPDF(optional)\r\n'''\r\n\r\nfrom newspdf.xhtml2pdf import pisa\r\n\r\n\r\ndata = open('test.html').read()\r\nresult = file('test.pdf','wb')\r\npdf = pisa.CreatePDF(data, result)\r\nresult.close()\r\npisa.startViewer('test.pdf')",
"<docstring token>\nfrom newspdf.xhtml2pdf import pisa\ndata = open('test.html').read()\nresult = file('test.pdf', 'wb')\npdf = pisa.CreatePDF(data, result)\nresult.close()\npisa.startViewer('test.pdf')\n",
"<docstring token>\n<import token>\ndata = open('test.html').read()\nresult = file('test.pdf', 'wb')\npdf = pisa.CreatePDF(data, result)\nresult.close()\npisa.startViewer('test.pdf')\n",
"<docstring token>\n<import token>\n<assignment token>\nresult.close()\npisa.startViewer('test.pdf')\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
99,579 |
e58b8c711543684b756877fba4dcefc5b54304b8
|
n=int(input())
l=[int(i) for i in input().split()]
for i in l:
if i==0:
l.remove(i)
l.append(i)
print(*l)
|
[
"n=int(input())\nl=[int(i) for i in input().split()]\nfor i in l:\n\tif i==0:\n\t\tl.remove(i)\n\t\tl.append(i)\nprint(*l)\n",
"n = int(input())\nl = [int(i) for i in input().split()]\nfor i in l:\n if i == 0:\n l.remove(i)\n l.append(i)\nprint(*l)\n",
"<assignment token>\nfor i in l:\n if i == 0:\n l.remove(i)\n l.append(i)\nprint(*l)\n",
"<assignment token>\n<code token>\n"
] | false |
99,580 |
cea22242cb3ff9d26204be009c0e07a1666efc91
|
from unittest import TestCase
class simpleTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testExample(self):
self.assertEqual(1, 1)
def testOther(self):
self.assertNotEqual(0, 1)
if '__main__' == __name__:
import unittest
unittest.main()
# vim: set ts=4 sw=4 expandtab enc=utf-8 :
|
[
"\nfrom unittest import TestCase\n\nclass simpleTest(TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\nif '__main__' == __name__:\n import unittest\n unittest.main()\n\n# vim: set ts=4 sw=4 expandtab enc=utf-8 :\n\n",
"from unittest import TestCase\n\n\nclass simpleTest(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\n\nif '__main__' == __name__:\n import unittest\n unittest.main()\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\n\nif '__main__' == __name__:\n import unittest\n unittest.main()\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\n\n<code token>\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n\n def setUp(self):\n pass\n <function token>\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\n\n<code token>\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n <function token>\n <function token>\n\n def testExample(self):\n self.assertEqual(1, 1)\n\n def testOther(self):\n self.assertNotEqual(0, 1)\n\n\n<code token>\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n <function token>\n <function token>\n\n def testExample(self):\n self.assertEqual(1, 1)\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass simpleTest(TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,581 |
ff770bca22acd53dd310716c518c82087482d025
|
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers.core import Activation,Dense,Dropout
from keras.layers import Conv2D,MaxPooling2D,Flatten
from keras.optimizers import SGD,Adam
from keras.datasets import mnist
from keras.utils import np_utils
from keras import initializers
from keras.utils.vis_utils import plot_model
# def init_weights(shape,name=None):#是否要做
# return initializers.normal(shape,scale=0.01,name=name)
#Using TensorFlow backend.
def load_data():
#载入数据
(x_train,y_train),(x_test,y_test)=mnist.load_data()
print('X_train original shape:', x_train.shape)
# plt.imshow(x_train[0])
number=10000
#数据处理
x_train=x_train[0:number]
y_train=y_train[0:number]
x_train=x_train.reshape(number,28*28)
x_test=x_test.reshape(x_test.shape[0],28*28)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
print('X_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train=np_utils.to_categorical(y_train,10)
y_test=np_utils.to_categorical(y_test,10)
x_train=x_train
x_test=x_test
x_train=x_train/255
x_test=x_test/255
return (x_train,y_train),(x_test,y_test)
(x_train,y_train),(x_test,y_test)=load_data()
#建立模型
model=Sequential()
model.add(Dense(input_dim=28*28,units=500,activation='relu'))
# model.add(Dense(input_dim=28*28,output_dim=500)) #输入层28*28,也就是图片,第一个输出层500个神经元
# model.add(Activation('sigmoid'))#激活函数
model.add(Dense(units=500,activation='relu'))
# model.add(Dense(output_dim=500))#第二个输出层,500个神经元
# model.add(Activation('sigmoid'))
model.add(Dense(units=10,activation='softmax'))
# model.add(Dense(output_dim=10))#最后输出层,10维
# model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',#损失函数进行评估
optimizer='adam',metrics=['accuracy'])#优化函数
model.fit(x_train,y_train,batch_size=16,nb_epoch=20)#Image,label,100个eample放在batch,每个batch重复20次
#testing data
score=model.evaluate(x_test,y_test)
print('Total loss on Testing Set:',score[0])
print('Accuracy of Testing Set: ',score[1])
result=model.predict(x_test)
|
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers.core import Activation,Dense,Dropout\nfrom keras.layers import Conv2D,MaxPooling2D,Flatten\nfrom keras.optimizers import SGD,Adam\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras import initializers\nfrom keras.utils.vis_utils import plot_model\n# def init_weights(shape,name=None):#是否要做\n# return initializers.normal(shape,scale=0.01,name=name)\n#Using TensorFlow backend.\ndef load_data():\n #载入数据\n (x_train,y_train),(x_test,y_test)=mnist.load_data()\n print('X_train original shape:', x_train.shape)\n # plt.imshow(x_train[0])\n number=10000\n #数据处理\n x_train=x_train[0:number]\n y_train=y_train[0:number]\n x_train=x_train.reshape(number,28*28)\n x_test=x_test.reshape(x_test.shape[0],28*28)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print('X_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n y_train=np_utils.to_categorical(y_train,10)\n y_test=np_utils.to_categorical(y_test,10)\n x_train=x_train\n x_test=x_test\n\n x_train=x_train/255\n x_test=x_test/255\n return (x_train,y_train),(x_test,y_test)\n\n(x_train,y_train),(x_test,y_test)=load_data()\n#建立模型\nmodel=Sequential()\n\nmodel.add(Dense(input_dim=28*28,units=500,activation='relu'))\n# model.add(Dense(input_dim=28*28,output_dim=500)) #输入层28*28,也就是图片,第一个输出层500个神经元\n# model.add(Activation('sigmoid'))#激活函数\n\nmodel.add(Dense(units=500,activation='relu'))\n# model.add(Dense(output_dim=500))#第二个输出层,500个神经元\n# model.add(Activation('sigmoid'))\n\nmodel.add(Dense(units=10,activation='softmax'))\n# model.add(Dense(output_dim=10))#最后输出层,10维\n# model.add(Activation('softmax'))\n\n\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',#损失函数进行评估\n optimizer='adam',metrics=['accuracy'])#优化函数\n\nmodel.fit(x_train,y_train,batch_size=16,nb_epoch=20)#Image,label,100个eample放在batch,每个batch重复20次\n\n#testing data\nscore=model.evaluate(x_test,y_test)\nprint('Total loss on Testing Set:',score[0])\nprint('Accuracy of Testing Set: ',score[1])\n\nresult=model.predict(x_test)\n\n\n\n",
"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers.core import Activation, Dense, Dropout\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten\nfrom keras.optimizers import SGD, Adam\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras import initializers\nfrom keras.utils.vis_utils import plot_model\n\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print('X_train original shape:', x_train.shape)\n number = 10000\n x_train = x_train[0:number]\n y_train = y_train[0:number]\n x_train = x_train.reshape(number, 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print('X_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n x_train = x_train\n x_test = x_test\n x_train = x_train / 255\n x_test = x_test / 255\n return (x_train, y_train), (x_test, y_test)\n\n\n(x_train, y_train), (x_test, y_test) = load_data()\nmodel = Sequential()\nmodel.add(Dense(input_dim=28 * 28, units=500, activation='relu'))\nmodel.add(Dense(units=500, activation='relu'))\nmodel.add(Dense(units=10, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(x_train, y_train, batch_size=16, nb_epoch=20)\nscore = model.evaluate(x_test, y_test)\nprint('Total loss on Testing Set:', score[0])\nprint('Accuracy of Testing Set: ', score[1])\nresult = model.predict(x_test)\n",
"<import token>\n\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print('X_train original shape:', x_train.shape)\n number = 10000\n x_train = x_train[0:number]\n y_train = y_train[0:number]\n x_train = x_train.reshape(number, 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print('X_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n x_train = x_train\n x_test = x_test\n x_train = x_train / 255\n x_test = x_test / 255\n return (x_train, y_train), (x_test, y_test)\n\n\n(x_train, y_train), (x_test, y_test) = load_data()\nmodel = Sequential()\nmodel.add(Dense(input_dim=28 * 28, units=500, activation='relu'))\nmodel.add(Dense(units=500, activation='relu'))\nmodel.add(Dense(units=10, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(x_train, y_train, batch_size=16, nb_epoch=20)\nscore = model.evaluate(x_test, y_test)\nprint('Total loss on Testing Set:', score[0])\nprint('Accuracy of Testing Set: ', score[1])\nresult = model.predict(x_test)\n",
"<import token>\n\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print('X_train original shape:', x_train.shape)\n number = 10000\n x_train = x_train[0:number]\n y_train = y_train[0:number]\n x_train = x_train.reshape(number, 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print('X_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n x_train = x_train\n x_test = x_test\n x_train = x_train / 255\n x_test = x_test / 255\n return (x_train, y_train), (x_test, y_test)\n\n\n<assignment token>\nmodel.add(Dense(input_dim=28 * 28, units=500, activation='relu'))\nmodel.add(Dense(units=500, activation='relu'))\nmodel.add(Dense(units=10, activation='softmax'))\nmodel.summary()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(x_train, y_train, batch_size=16, nb_epoch=20)\n<assignment token>\nprint('Total loss on Testing Set:', score[0])\nprint('Accuracy of Testing Set: ', score[1])\n<assignment token>\n",
"<import token>\n\n\ndef load_data():\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print('X_train original shape:', x_train.shape)\n number = 10000\n x_train = x_train[0:number]\n y_train = y_train[0:number]\n x_train = x_train.reshape(number, 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print('X_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n x_train = x_train\n x_test = x_test\n x_train = x_train / 255\n x_test = x_test / 255\n return (x_train, y_train), (x_test, y_test)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
99,582 |
36bc57389ba8dbc00c720071f7807b58393ee3d1
|
#!/usr/bin/python3
import asyncio
@asyncio.coroutine
def my_coroutine(future, task_name, seconds_to_sleep=3):
print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name, seconds_to_sleep))
yield from asyncio.sleep(seconds_to_sleep)
future.set_result('{} is finished'.format(task_name))
def got_result(future):
print(future.result())
loop = asyncio.get_event_loop()
future1 = asyncio.Future()
future2 = asyncio.Future()
tasks = [
my_coroutine(future1, 'task1', 4),
my_coroutine(future2, 'task3', 2),
]
future1.add_done_callback(got_result)
future2.add_done_callback(got_result)
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
|
[
"#!/usr/bin/python3\n\nimport asyncio\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name, seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\ndef got_result(future):\n print(future.result())\n\nloop = asyncio.get_event_loop()\nfuture1 = asyncio.Future()\nfuture2 = asyncio.Future()\n\ntasks = [\n my_coroutine(future1, 'task1', 4),\n my_coroutine(future2, 'task3', 2),\n]\n\nfuture1.add_done_callback(got_result)\nfuture2.add_done_callback(got_result)\n\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()",
"import asyncio\n\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name,\n seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\ndef got_result(future):\n print(future.result())\n\n\nloop = asyncio.get_event_loop()\nfuture1 = asyncio.Future()\nfuture2 = asyncio.Future()\ntasks = [my_coroutine(future1, 'task1', 4), my_coroutine(future2, 'task3', 2)]\nfuture1.add_done_callback(got_result)\nfuture2.add_done_callback(got_result)\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n",
"<import token>\n\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name,\n seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\ndef got_result(future):\n print(future.result())\n\n\nloop = asyncio.get_event_loop()\nfuture1 = asyncio.Future()\nfuture2 = asyncio.Future()\ntasks = [my_coroutine(future1, 'task1', 4), my_coroutine(future2, 'task3', 2)]\nfuture1.add_done_callback(got_result)\nfuture2.add_done_callback(got_result)\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n",
"<import token>\n\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name,\n seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\ndef got_result(future):\n print(future.result())\n\n\n<assignment token>\nfuture1.add_done_callback(got_result)\nfuture2.add_done_callback(got_result)\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n",
"<import token>\n\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name,\n seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\ndef got_result(future):\n print(future.result())\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\[email protected]\ndef my_coroutine(future, task_name, seconds_to_sleep=3):\n print('{0} my_coroutine sleeping for : {1} seconds'.format(task_name,\n seconds_to_sleep))\n yield from asyncio.sleep(seconds_to_sleep)\n future.set_result('{} is finished'.format(task_name))\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,583 |
92f89425cded169f90d24fccc60662a365e08b67
|
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from base.SO3 import SO3
from base.utility import test_matrix_equal
default_tol_place = 2
class TestSO3(unittest.TestCase):
def setUp(self):
pass
def test_default_constructor(self):
rot = SO3()
result = test_matrix_equal(rot.get_matrix(), np.eye(3))
self.assertTrue(result, "Default is not identity!")
def test_euler_constructor(self):
roll, pitch, yaw = (0.1, -0.2, 0.3)
rot = SO3.from_euler(roll, pitch, yaw)
self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)
self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place)
self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)
def test_two_directions_constructor_opposite(self):
d_f = np.array([ -9.41427684e-03, -7.26582309e-03, 9.78452150e+00], dtype=np.float)
d_t = np.array([0, 0, -9.81], dtype=np.float)
R = SO3.from_two_directions(d_f, d_t)
sum_error = 0
for (x1, x2) in zip(d_t, R * d_f):
sum_error += np.sqrt((x1 - x2)**2)
d_t_recovered = R * d_f
sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t) / np.linalg.norm(d_t_recovered)
sin_theta = np.linalg.norm(sin_theta)
self.assertAlmostEqual(sin_theta, 0, default_tol_place)
# test if determinant is 1
det = np.linalg.det(R.get_matrix())
self.assertAlmostEqual(det, 1, default_tol_place)
def test_inverse(self):
rot = SO3.from_euler(0.1, -0.2, 0.3)
tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())
result = test_matrix_equal(tmp, np.eye(3))
self.assertTrue(result, "Inverse not correct!")
def test_exp_ln(self):
rot = SO3()
so3 = rot.ln()
result = test_matrix_equal(so3, np.zeros_like(so3))
self.assertTrue(result, "Log not correct!")
rot = SO3.from_euler(0.1, -0.2, 0.3)
so3 = rot.ln()
result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())
self.assertTrue(result)
if (__name__ == "__main__"):
unittest.main()
|
[
"import sys\nimport os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n\nimport unittest\nimport numpy as np\n\nfrom base.SO3 import SO3\nfrom base.utility import test_matrix_equal\n\ndefault_tol_place = 2\n\n\n\nclass TestSO3(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_default_constructor(self):\n rot = SO3()\n result = test_matrix_equal(rot.get_matrix(), np.eye(3))\n self.assertTrue(result, \"Default is not identity!\")\n\n def test_euler_constructor(self):\n roll, pitch, yaw = (0.1, -0.2, 0.3)\n rot = SO3.from_euler(roll, pitch, yaw)\n\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place)\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([ -9.41427684e-03, -7.26582309e-03, 9.78452150e+00], dtype=np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n\n R = SO3.from_two_directions(d_f, d_t)\n\n sum_error = 0\n for (x1, x2) in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2)**2)\n\n d_t_recovered = R * d_f\n\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n\n # test if determinant is 1\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n\n self.assertTrue(result, \"Inverse not correct!\")\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n\n self.assertTrue(result, \"Log not correct!\")\n\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n\n self.assertTrue(result)\n\nif (__name__ == \"__main__\"):\n unittest.main()\n",
"import sys\nimport os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport unittest\nimport numpy as np\nfrom base.SO3 import SO3\nfrom base.utility import test_matrix_equal\ndefault_tol_place = 2\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_default_constructor(self):\n rot = SO3()\n result = test_matrix_equal(rot.get_matrix(), np.eye(3))\n self.assertTrue(result, 'Default is not identity!')\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n self.assertTrue(result, 'Inverse not correct!')\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n<import token>\ndefault_tol_place = 2\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_default_constructor(self):\n rot = SO3()\n result = test_matrix_equal(rot.get_matrix(), np.eye(3))\n self.assertTrue(result, 'Default is not identity!')\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n self.assertTrue(result, 'Inverse not correct!')\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_default_constructor(self):\n rot = SO3()\n result = test_matrix_equal(rot.get_matrix(), np.eye(3))\n self.assertTrue(result, 'Default is not identity!')\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n self.assertTrue(result, 'Inverse not correct!')\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_default_constructor(self):\n rot = SO3()\n result = test_matrix_equal(rot.get_matrix(), np.eye(3))\n self.assertTrue(result, 'Default is not identity!')\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n self.assertTrue(result, 'Inverse not correct!')\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n <function token>\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n\n def test_inverse(self):\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n tmp = np.dot(rot.get_matrix(), rot.inverse().get_matrix())\n result = test_matrix_equal(tmp, np.eye(3))\n self.assertTrue(result, 'Inverse not correct!')\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n <function token>\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n <function token>\n\n def test_exp_ln(self):\n rot = SO3()\n so3 = rot.ln()\n result = test_matrix_equal(so3, np.zeros_like(so3))\n self.assertTrue(result, 'Log not correct!')\n rot = SO3.from_euler(0.1, -0.2, 0.3)\n so3 = rot.ln()\n result = test_matrix_equal(rot.get_matrix(), SO3.exp(so3).get_matrix())\n self.assertTrue(result)\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n\n def setUp(self):\n pass\n <function token>\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n <function token>\n <function token>\n\n def test_euler_constructor(self):\n roll, pitch, yaw = 0.1, -0.2, 0.3\n rot = SO3.from_euler(roll, pitch, yaw)\n self.assertAlmostEqual(rot.get_roll(), roll, places=default_tol_place)\n self.assertAlmostEqual(rot.get_pitch(), pitch, places=default_tol_place\n )\n self.assertAlmostEqual(rot.get_yaw(), yaw, places=default_tol_place)\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_two_directions_constructor_opposite(self):\n d_f = np.array([-0.00941427684, -0.00726582309, 9.7845215], dtype=\n np.float)\n d_t = np.array([0, 0, -9.81], dtype=np.float)\n R = SO3.from_two_directions(d_f, d_t)\n sum_error = 0\n for x1, x2 in zip(d_t, R * d_f):\n sum_error += np.sqrt((x1 - x2) ** 2)\n d_t_recovered = R * d_f\n sin_theta = np.cross(d_t, d_t_recovered) / np.linalg.norm(d_t\n ) / np.linalg.norm(d_t_recovered)\n sin_theta = np.linalg.norm(sin_theta)\n self.assertAlmostEqual(sin_theta, 0, default_tol_place)\n det = np.linalg.det(R.get_matrix())\n self.assertAlmostEqual(det, 1, default_tol_place)\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass TestSO3(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
99,584 |
908fb261ef22954fb1093fc6bf1bb09f816a8d30
|
from SPARQLWrapper import SPARQLWrapper, JSON
from urllib2 import URLError
import os, sys, json, time
map = {'ulan':"http://vocab.getty.edu/sparql",
'aac':"http://data.americanartcollaborative.org/sparql"}
files = os.listdir( os.path.join(os.path.dirname(os.path.realpath(__file__)),'sparql'))
if not os.path.exists('dataset'):
os.makedirs('dataset')
# Iterate over all SPARQL files
for f in files:
# Extract museum name
base = f[:f.index('.')] # ulan, npg etc.
f_in = open(os.path.join('sparql',f), 'r')
if len(sys.argv) > 1 and base not in sys.argv[1].split():
continue
# Send SPARQL query
if 'ulan' in base:
sparql = SPARQLWrapper(map['ulan'])
else:
sparql = SPARQLWrapper(map['aac'])
sparql.setQuery(f_in.read())
sparql.setReturnFormat(JSON)
sparql.setTimeout(360)
while True:
print "Downloading ",base," dataset"
try:
results = sparql.query().convert()
break
except URLError:
print("Connection to Sparql server failed! Trying again in five seconds!")
time.sleep(5)
f_in.close()
# Save the results
out = open(os.path.join('dataset',base+'.json'),'w')
for entity in results["results"]["bindings"]:
out.write(json.dumps(entity))
out.write("\n")
out.close()
time.sleep(10)
|
[
"from SPARQLWrapper import SPARQLWrapper, JSON\nfrom urllib2 import URLError\nimport os, sys, json, time\n\nmap = {'ulan':\"http://vocab.getty.edu/sparql\",\n 'aac':\"http://data.americanartcollaborative.org/sparql\"}\n\nfiles = os.listdir( os.path.join(os.path.dirname(os.path.realpath(__file__)),'sparql'))\n\nif not os.path.exists('dataset'):\n os.makedirs('dataset')\n\n# Iterate over all SPARQL files\nfor f in files:\n # Extract museum name\n base = f[:f.index('.')] # ulan, npg etc.\n f_in = open(os.path.join('sparql',f), 'r')\n \n if len(sys.argv) > 1 and base not in sys.argv[1].split():\n continue\n \n # Send SPARQL query\n if 'ulan' in base:\n sparql = SPARQLWrapper(map['ulan'])\n else:\n sparql = SPARQLWrapper(map['aac'])\n \n sparql.setQuery(f_in.read())\n sparql.setReturnFormat(JSON)\n sparql.setTimeout(360)\n while True:\n print \"Downloading \",base,\" dataset\"\n try:\n results = sparql.query().convert()\n break\n except URLError:\n print(\"Connection to Sparql server failed! Trying again in five seconds!\")\n time.sleep(5)\n \n f_in.close()\n \n # Save the results\n out = open(os.path.join('dataset',base+'.json'),'w')\n for entity in results[\"results\"][\"bindings\"]:\n out.write(json.dumps(entity))\n out.write(\"\\n\")\n out.close()\n \n time.sleep(10)"
] | true |
99,585 |
1b16f580c4b118379b070d4e7accb310ddbf50ca
|
import numpy as np
import pandas as pd
import sys
import csv
import matplotlib.pyplot as plt
fname = sys.argv[1]
df = pd.read_csv(sys.argv[1], index_col = None, header = None, delim_whitespace = True, comment = "#")
data = df.as_matrix()[:, :]
time = data[:, 1]
efield = data[:, 3]
newt = np.linspace(time[0], (time[1] - time[0]) * len(time) * 11, len(time) * 11)
efield = np.concatenate([efield, np.zeros(10 * len(time))])
freq = np.fft.fftfreq(len(newt), time[1] - time[0])
fft = np.fft.fft(efield)
df2 = pd.DataFrame(np.array([np.abs(freq), np.abs(fft), np.real(fft), np.imag(fft), (np.abs(fft))**2]).T)
df2.to_csv("fft.txt", index=None, header=None,
sep="\t",
float_format="%20.10E",
quoting=csv.QUOTE_NONE,
#quotechar=' ',
#quotechar='"',
#quotechar="'",
#escapechar=" ",
escapechar=""
)
<<<<<<< HEAD
=======
print("what is going on")
print("calc-divide")
>>>>>>> calc-divide
|
[
"import numpy as np\nimport pandas as pd\nimport sys\nimport csv\nimport matplotlib.pyplot as plt\n\nfname = sys.argv[1]\ndf = pd.read_csv(sys.argv[1], index_col = None, header = None, delim_whitespace = True, comment = \"#\")\ndata = df.as_matrix()[:, :]\ntime = data[:, 1]\nefield = data[:, 3]\n\n\nnewt = np.linspace(time[0], (time[1] - time[0]) * len(time) * 11, len(time) * 11)\nefield = np.concatenate([efield, np.zeros(10 * len(time))])\nfreq = np.fft.fftfreq(len(newt), time[1] - time[0])\nfft = np.fft.fft(efield)\n\n\ndf2 = pd.DataFrame(np.array([np.abs(freq), np.abs(fft), np.real(fft), np.imag(fft), (np.abs(fft))**2]).T)\n\ndf2.to_csv(\"fft.txt\", index=None, header=None,\n sep=\"\\t\",\n float_format=\"%20.10E\",\n quoting=csv.QUOTE_NONE,\n #quotechar=' ',\n #quotechar='\"',\n #quotechar=\"'\",\n #escapechar=\" \",\n escapechar=\"\"\n )\n<<<<<<< HEAD\n=======\n\nprint(\"what is going on\")\nprint(\"calc-divide\")\n>>>>>>> calc-divide\n"
] | true |
99,586 |
2ff982a58660473248830662c6b450509290b36b
|
# encoding:utf-8
import sys
__author__ = 'zhaoxiaojun'
reload(sys)
sys.setdefaultencoding('utf-8')
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
res = 0
count = 0
while num > 0:
flag = num & 1
if flag == 0:
res += pow(2, count)
count += 1
num >>= 1
return res
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
start = 0
str_list = list(s)
for ind in range(len(s)):
if not str(s[ind]).strip() or ind == len(s) - 1:
end = ind - 1
if ind == len(s) - 1:
end = ind
while start < end:
tmp = s[start]
str_list[start] = s[end]
str_list[end] = tmp
start += 1
end -= 1
start = ind + 1
return ''.join(str_list)
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
s1 = set('qwertyuiop')
s2 = set('asdfghjkl')
s3 = set('zxcvbnm')
res_list = []
for word in words:
ws = set(str(word).lower())
if ws.issubset(s1) or ws.issubset(s2) or ws.issubset(s3):
res_list.append(word)
return res_list
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
stack = []
total = 0
for ind in range(len(ops)):
if str(ops[ind]).upper() == 'D':
last_valid_score = stack.pop()
current_score = last_valid_score * 2
stack.append(last_valid_score)
stack.append(current_score)
total += current_score
elif str(ops[ind]).upper() == 'C':
invalid_score = stack.pop()
total -= invalid_score
elif str(ops[ind]).upper() == '+':
last_valid_score1 = stack.pop()
last_valid_score2 = stack.pop()
current_score = last_valid_score1 + last_valid_score2
stack.append(last_valid_score2)
stack.append(last_valid_score1)
stack.append(current_score)
total += current_score
else:
total += int(ops[ind])
stack.append(int(ops[ind]))
return total
def distributeCandies(self, candies):
"""
:type candies: List[int]
:rtype: int
"""
sis_set = set()
max_len = len(candies) / 2
for num in candies:
if num not in sis_set and len(sis_set) < max_len:
sis_set.update([num])
return len(sis_set)
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
res_list = []
for ind in range(1, n + 1):
cur_val = ''
if ind % 3 == 0:
cur_val = 'Fizz'
if ind % 5 == 0:
cur_val = '%s%s' % (cur_val, 'Buzz')
if cur_val == '':
cur_val = ind
res_list.append(cur_val)
return res_list
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
flat_list = []
for nu_list in nums:
flat_list += nu_list
if r * c > len(flat_list):
return nums
else:
res_list = []
for count in range(r):
res_list.append(flat_list[c * count:(count + 1) * c])
return res_list
if __name__ == '__main__':
so = Solution()
print so.findComplement(5)
print so.reverseWords("Let's take")
print so.findWords(["Hello", "Alaska", "Dad", "Peace"])
print so.calPoints(["5", "-2", "4", "C", "D", "9", "+", "+"])
print so.distributeCandies([1, 1, 2, 2, 3, 3])
print so.fizzBuzz(15)
print so.matrixReshape([[1, 2], [3, 4]], 1, 4)
|
[
"# encoding:utf-8\nimport sys\n\n__author__ = 'zhaoxiaojun'\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass Solution(object):\n def findComplement(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n res = 0\n count = 0\n while num > 0:\n flag = num & 1\n if flag == 0:\n res += pow(2, count)\n count += 1\n num >>= 1\n return res\n\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n start = 0\n str_list = list(s)\n for ind in range(len(s)):\n if not str(s[ind]).strip() or ind == len(s) - 1:\n end = ind - 1\n if ind == len(s) - 1:\n end = ind\n while start < end:\n tmp = s[start]\n str_list[start] = s[end]\n str_list[end] = tmp\n start += 1\n end -= 1\n start = ind + 1\n return ''.join(str_list)\n\n def findWords(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n s1 = set('qwertyuiop')\n s2 = set('asdfghjkl')\n s3 = set('zxcvbnm')\n res_list = []\n for word in words:\n ws = set(str(word).lower())\n if ws.issubset(s1) or ws.issubset(s2) or ws.issubset(s3):\n res_list.append(word)\n return res_list\n\n def calPoints(self, ops):\n \"\"\"\n :type ops: List[str]\n :rtype: int\n \"\"\"\n stack = []\n total = 0\n for ind in range(len(ops)):\n if str(ops[ind]).upper() == 'D':\n last_valid_score = stack.pop()\n current_score = last_valid_score * 2\n stack.append(last_valid_score)\n stack.append(current_score)\n total += current_score\n elif str(ops[ind]).upper() == 'C':\n invalid_score = stack.pop()\n total -= invalid_score\n elif str(ops[ind]).upper() == '+':\n last_valid_score1 = stack.pop()\n last_valid_score2 = stack.pop()\n current_score = last_valid_score1 + last_valid_score2\n stack.append(last_valid_score2)\n stack.append(last_valid_score1)\n stack.append(current_score)\n total += current_score\n else:\n total += int(ops[ind])\n stack.append(int(ops[ind]))\n return total\n\n def distributeCandies(self, candies):\n \"\"\"\n :type candies: List[int]\n :rtype: int\n \"\"\"\n sis_set = set()\n max_len = len(candies) / 2\n for num in candies:\n if num not in sis_set and len(sis_set) < max_len:\n sis_set.update([num])\n return len(sis_set)\n\n def fizzBuzz(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n res_list = []\n for ind in range(1, n + 1):\n cur_val = ''\n if ind % 3 == 0:\n cur_val = 'Fizz'\n if ind % 5 == 0:\n cur_val = '%s%s' % (cur_val, 'Buzz')\n if cur_val == '':\n cur_val = ind\n res_list.append(cur_val)\n return res_list\n\n def matrixReshape(self, nums, r, c):\n \"\"\"\n :type nums: List[List[int]]\n :type r: int\n :type c: int\n :rtype: List[List[int]]\n \"\"\"\n flat_list = []\n for nu_list in nums:\n flat_list += nu_list\n if r * c > len(flat_list):\n return nums\n else:\n res_list = []\n for count in range(r):\n res_list.append(flat_list[c * count:(count + 1) * c])\n return res_list\n\n\nif __name__ == '__main__':\n so = Solution()\n print so.findComplement(5)\n print so.reverseWords(\"Let's take\")\n print so.findWords([\"Hello\", \"Alaska\", \"Dad\", \"Peace\"])\n print so.calPoints([\"5\", \"-2\", \"4\", \"C\", \"D\", \"9\", \"+\", \"+\"])\n print so.distributeCandies([1, 1, 2, 2, 3, 3])\n print so.fizzBuzz(15)\n print so.matrixReshape([[1, 2], [3, 4]], 1, 4)\n"
] | true |
99,587 |
8b77411d2fcabd919a2cd03259efb8aa27f271a6
|
import sys
import json
import argparse
from Data.stock_choices import list_of_stocks
from Make_Prediction.ARIMA import ARIMA_implementation
class training_parser():
'''
The job of this class is to take the user's arguments as input and decide what
should happen next
'''
def __init__(self):
#Gets the list of stock names
self.list_of_stocks = list_of_stocks
def create_parser(self):
'''
The goal of this function is to create the parser
'''
self.training_parser = argparse.ArgumentParser()
self.add_arguments()
def add_arguments(self):
'''
The goal of this function is to add all necessary arguments to the parser
'''
#Allow user to choose what stock to train for
self.training_parser.add_argument('--choice')
#Allow the user to see a list of all available
self.training_parser.add_argument('--list')
#Allow the user to see what stocks already have optimal parameters trained for
self.training_parser.add_argument('--trained')
self.args = self.training_parser.parse_args()
self.check_arguments()
def check_arguments(self):
'''
The goal of this function is to take the users input and decide what the
output should be. This is where the logic is held
'''
#Check to see if the stock was an acceptable input
length = len(list_of_stocks)
for i in list_of_stocks:
if self.args.choice == i:
self.stock_choice = i
#If the stock was acceptable, break the loop
break
length -= 1
if length == 0 and self.args.choice is not None:
#This only happens when the stock was not a valid name
print("")
print("Enter a valid stock!")
sys.exit(0)
#This will show the list of stocks that are acceptable to train for
if self.args.list == "show":
print("")
print("The list of stocks are: ")
print("")
print(list_of_stocks)
sys.exit(0)
#This wlil show the user the stocks that already have optimal variabels
if self.args.trained == "show":
with open('C:\Programming\Projects\Current GitHub Project\-MAKE-A-NAME-\Data/best_parameters.json') as file_save:
try:
current_dictionary = json.load(file_save)
except json.decoderself.JSONDecodeError:
current_dictionary = {}
print("")
print("Stocks that are already trainined for are:")
print("")
print('%s'%(current_dictionary))
sys.exit()
try:
#Start the training
self.start_training(self.stock_choice)
except AttributeError:
#This only happens when the user did not provide any arguments
print("")
print("""Use a command:
[--list show] prints off the list of available stocks
[--trained show] prints the stocks that already have been trainined for
[--choice *STOCK NAME*] allows you to train the model for a particular stock""")
sys.exit(0)
def start_training(self, stock_choice):
'''
The goal of this function is to start the training
'''
# Self.stock_choice is repeated because the visualize operation can call to train the stock, this allows the
# Training process to know what stock to train for without adding additional functions
self.stock_choice = stock_choice
ARIMA = ARIMA_implementation(self.stock_choice)
self.best_order = ARIMA.main()
self.save_order()
def save_order(self):
'''
The goal of this function is to save the optimal (p,d,q) values for future use
'''
#Saves the order to a JSON file
with open('C:\Programming\Projects\Current GitHub Project\-MAKE-A-NAME-\Data/best_parameters.json') as file_save:
try:
current_dictionary = json.load(file_save)
except json.decoderself.JSONDecodeError:
#This gets called if the JSON file is empty
current_dictionary = {}
with open('C:\Programming\Projects\Current GitHub Project\-MAKE-A-NAME-\Data/best_parameters.json', 'w') as file_save:
#Saves the values
current_dictionary[str(self.stock_choice)] = str(self.best_order)
json.dump(current_dictionary, file_save)
def main(self):
'''
The goal of this function is to start the whole process
'''
self.create_parser()
if __name__ == "__main__":
train = training_parser()
train.main()
|
[
"import sys\r\nimport json\r\nimport argparse\r\n\r\nfrom Data.stock_choices import list_of_stocks\r\nfrom Make_Prediction.ARIMA import ARIMA_implementation\r\n\r\nclass training_parser():\r\n '''\r\n The job of this class is to take the user's arguments as input and decide what\r\n should happen next\r\n '''\r\n\r\n def __init__(self):\r\n #Gets the list of stock names\r\n self.list_of_stocks = list_of_stocks\r\n\r\n def create_parser(self):\r\n '''\r\n The goal of this function is to create the parser\r\n '''\r\n self.training_parser = argparse.ArgumentParser()\r\n self.add_arguments()\r\n\r\n def add_arguments(self):\r\n '''\r\n The goal of this function is to add all necessary arguments to the parser\r\n '''\r\n #Allow user to choose what stock to train for\r\n self.training_parser.add_argument('--choice')\r\n\r\n #Allow the user to see a list of all available\r\n self.training_parser.add_argument('--list')\r\n\r\n #Allow the user to see what stocks already have optimal parameters trained for\r\n self.training_parser.add_argument('--trained')\r\n\r\n self.args = self.training_parser.parse_args()\r\n self.check_arguments()\r\n\r\n def check_arguments(self):\r\n '''\r\n The goal of this function is to take the users input and decide what the\r\n output should be. This is where the logic is held\r\n '''\r\n #Check to see if the stock was an acceptable input\r\n length = len(list_of_stocks)\r\n for i in list_of_stocks:\r\n if self.args.choice == i:\r\n self.stock_choice = i\r\n #If the stock was acceptable, break the loop\r\n break\r\n length -= 1\r\n\r\n if length == 0 and self.args.choice is not None:\r\n #This only happens when the stock was not a valid name\r\n print(\"\")\r\n print(\"Enter a valid stock!\")\r\n sys.exit(0)\r\n\r\n #This will show the list of stocks that are acceptable to train for\r\n if self.args.list == \"show\":\r\n print(\"\")\r\n print(\"The list of stocks are: \")\r\n print(\"\")\r\n print(list_of_stocks)\r\n sys.exit(0)\r\n\r\n #This wlil show the user the stocks that already have optimal variabels\r\n if self.args.trained == \"show\":\r\n with open('C:\\Programming\\Projects\\Current GitHub Project\\-MAKE-A-NAME-\\Data/best_parameters.json') as file_save:\r\n try:\r\n current_dictionary = json.load(file_save)\r\n except json.decoderself.JSONDecodeError:\r\n current_dictionary = {}\r\n print(\"\")\r\n print(\"Stocks that are already trainined for are:\")\r\n print(\"\")\r\n print('%s'%(current_dictionary))\r\n sys.exit()\r\n\r\n try:\r\n #Start the training\r\n self.start_training(self.stock_choice)\r\n except AttributeError:\r\n #This only happens when the user did not provide any arguments\r\n print(\"\")\r\n print(\"\"\"Use a command:\r\n [--list show] prints off the list of available stocks\r\n [--trained show] prints the stocks that already have been trainined for\r\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\")\r\n sys.exit(0)\r\n\r\n def start_training(self, stock_choice):\r\n '''\r\n The goal of this function is to start the training\r\n '''\r\n # Self.stock_choice is repeated because the visualize operation can call to train the stock, this allows the\r\n # Training process to know what stock to train for without adding additional functions\r\n self.stock_choice = stock_choice\r\n ARIMA = ARIMA_implementation(self.stock_choice)\r\n self.best_order = ARIMA.main()\r\n\r\n self.save_order()\r\n\r\n def save_order(self):\r\n '''\r\n The goal of this function is to save the optimal (p,d,q) values for future use\r\n '''\r\n #Saves the order to a JSON file\r\n with open('C:\\Programming\\Projects\\Current GitHub Project\\-MAKE-A-NAME-\\Data/best_parameters.json') as file_save:\r\n try:\r\n current_dictionary = json.load(file_save)\r\n except json.decoderself.JSONDecodeError:\r\n #This gets called if the JSON file is empty\r\n current_dictionary = {}\r\n\r\n with open('C:\\Programming\\Projects\\Current GitHub Project\\-MAKE-A-NAME-\\Data/best_parameters.json', 'w') as file_save:\r\n #Saves the values\r\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\r\n json.dump(current_dictionary, file_save)\r\n\r\n def main(self):\r\n '''\r\n The goal of this function is to start the whole process\r\n '''\r\n self.create_parser()\r\n\r\nif __name__ == \"__main__\":\r\n train = training_parser()\r\n train.main()\r\n",
"import sys\nimport json\nimport argparse\nfrom Data.stock_choices import list_of_stocks\nfrom Make_Prediction.ARIMA import ARIMA_implementation\n\n\nclass training_parser:\n \"\"\"\n The job of this class is to take the user's arguments as input and decide what\n should happen next\n \"\"\"\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n\n def create_parser(self):\n \"\"\"\n The goal of this function is to create the parser\n \"\"\"\n self.training_parser = argparse.ArgumentParser()\n self.add_arguments()\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n\n def save_order(self):\n \"\"\"\n The goal of this function is to save the optimal (p,d,q) values for future use\n \"\"\"\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n , 'w') as file_save:\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\n json.dump(current_dictionary, file_save)\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\nif __name__ == '__main__':\n train = training_parser()\n train.main()\n",
"<import token>\n\n\nclass training_parser:\n \"\"\"\n The job of this class is to take the user's arguments as input and decide what\n should happen next\n \"\"\"\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n\n def create_parser(self):\n \"\"\"\n The goal of this function is to create the parser\n \"\"\"\n self.training_parser = argparse.ArgumentParser()\n self.add_arguments()\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n\n def save_order(self):\n \"\"\"\n The goal of this function is to save the optimal (p,d,q) values for future use\n \"\"\"\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n , 'w') as file_save:\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\n json.dump(current_dictionary, file_save)\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\nif __name__ == '__main__':\n train = training_parser()\n train.main()\n",
"<import token>\n\n\nclass training_parser:\n \"\"\"\n The job of this class is to take the user's arguments as input and decide what\n should happen next\n \"\"\"\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n\n def create_parser(self):\n \"\"\"\n The goal of this function is to create the parser\n \"\"\"\n self.training_parser = argparse.ArgumentParser()\n self.add_arguments()\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n\n def save_order(self):\n \"\"\"\n The goal of this function is to save the optimal (p,d,q) values for future use\n \"\"\"\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n , 'w') as file_save:\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\n json.dump(current_dictionary, file_save)\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n\n def create_parser(self):\n \"\"\"\n The goal of this function is to create the parser\n \"\"\"\n self.training_parser = argparse.ArgumentParser()\n self.add_arguments()\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n\n def save_order(self):\n \"\"\"\n The goal of this function is to save the optimal (p,d,q) values for future use\n \"\"\"\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n , 'w') as file_save:\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\n json.dump(current_dictionary, file_save)\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n\n def save_order(self):\n \"\"\"\n The goal of this function is to save the optimal (p,d,q) values for future use\n \"\"\"\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n , 'w') as file_save:\n current_dictionary[str(self.stock_choice)] = str(self.best_order)\n json.dump(current_dictionary, file_save)\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n\n def check_arguments(self):\n \"\"\"\n The goal of this function is to take the users input and decide what the\n output should be. This is where the logic is held\n \"\"\"\n length = len(list_of_stocks)\n for i in list_of_stocks:\n if self.args.choice == i:\n self.stock_choice = i\n break\n length -= 1\n if length == 0 and self.args.choice is not None:\n print('')\n print('Enter a valid stock!')\n sys.exit(0)\n if self.args.list == 'show':\n print('')\n print('The list of stocks are: ')\n print('')\n print(list_of_stocks)\n sys.exit(0)\n if self.args.trained == 'show':\n with open(\n 'C:\\\\Programming\\\\Projects\\\\Current GitHub Project\\\\-MAKE-A-NAME-\\\\Data/best_parameters.json'\n ) as file_save:\n try:\n current_dictionary = json.load(file_save)\n except json.decoderself.JSONDecodeError:\n current_dictionary = {}\n print('')\n print('Stocks that are already trainined for are:')\n print('')\n print('%s' % current_dictionary)\n sys.exit()\n try:\n self.start_training(self.stock_choice)\n except AttributeError:\n print('')\n print(\n \"\"\"Use a command:\n [--list show] prints off the list of available stocks\n [--trained show] prints the stocks that already have been trainined for\n [--choice *STOCK NAME*] allows you to train the model for a particular stock\"\"\"\n )\n sys.exit(0)\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n <function token>\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n <function token>\n\n def start_training(self, stock_choice):\n \"\"\"\n The goal of this function is to start the training\n \"\"\"\n self.stock_choice = stock_choice\n ARIMA = ARIMA_implementation(self.stock_choice)\n self.best_order = ARIMA.main()\n self.save_order()\n <function token>\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n <function token>\n <function token>\n <function token>\n\n def main(self):\n \"\"\"\n The goal of this function is to start the whole process\n \"\"\"\n self.create_parser()\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n\n def __init__(self):\n self.list_of_stocks = list_of_stocks\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n <function token>\n <function token>\n\n def add_arguments(self):\n \"\"\"\n The goal of this function is to add all necessary arguments to the parser\n \"\"\"\n self.training_parser.add_argument('--choice')\n self.training_parser.add_argument('--list')\n self.training_parser.add_argument('--trained')\n self.args = self.training_parser.parse_args()\n self.check_arguments()\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass training_parser:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,588 |
913d3c06947e1b19aa4c4ad8a8886fa73d764835
|
import itertools
from mdp import *
actions = ["up", "down", "left", "right", "do challenge"]
locations = ["loc" + str(i) for i in range(1,10)]
locations_to_map = {}
for l in locations:
locations_to_map[l] = l
locations_to_map["loc1"] = "station_3"
locations_to_map["loc2"] = "home"
locations_to_map["loc5"] = "corridor"
locations_to_map["loc6"] = "recharging_terminal"
locations_to_map["loc7"] = "station_2"
locations_to_map["loc9"] = "station_1"
# 0 represents challenge not done, 1 otherwise
# challenge 1 - 5
init = ("loc2", tuple(list("00000")))
terminals = []
for l in locations:
terminals.append((l, tuple(list("11111"))))
# maps challenge number (1-5) to location?
challenge_locations = {}
challenge_locations[1] = "loc9" #mcts
challenge_locations[2] = "loc7" #reachability
challenge_locations[3] = "loc1" #vision1
challenge_locations[4] = "loc1" #vision2
challenge_locations[5] = "loc1" #vision3
# get all possible challenge states
challenge_states = [tuple(list("00000")), tuple(list("11111"))]
for i in itertools.permutations(list("10000")):
if i not in challenge_states:
challenge_states.append(tuple(list(i)))
for i in itertools.permutations(list("11000")):
if i not in challenge_states:
challenge_states.append(tuple(list(i)))
for i in itertools.permutations(list("11100")):
if i not in challenge_states:
challenge_states.append(tuple(list(i)))
for i in itertools.permutations(list("11110")):
if i not in challenge_states:
challenge_states.append(tuple(list(i)))
all_states = []
for loc in locations:
for c in challenge_states:
all_states.append((loc, c))
# MDP TRANSITIONS
transitions = {}
prob_succeed_challenge = 0.8
prob_fail_challenge = 1.0 - prob_succeed_challenge
# challenge location transitions
for num, loc in challenge_locations.iteritems():
#print "num", num, "loc", loc
# for every challenge state that has a 0 at index num-1, transition to 1 at index num-1
# i.e. if challenge 1, every state with 0xxxx --> 1xxxx
# assuming we don't allow robot to do the challenge again if it has already succeeded?
for s in challenge_states:
if s[num-1] == "0":
current_state = (loc, s)
new_state = list(s)[:]
new_state[num-1] = "1"
new_state = (loc, tuple(new_state))
# state, action, new state, prob
key = (current_state, "do challenge")
if key not in transitions:
transitions[key] = {}
transitions[key][new_state] = prob_succeed_challenge
transitions[key][current_state] = prob_fail_challenge
#print current_state, new_state, "do challenge"
# print current_state do challenge, current_state, prob of failing challenge
# moving transitions
prob_action_succeed = 0.8
prob_action_fail = 1.0 - prob_action_succeed
# action up
for i in [1, 4, 5, 3]:
for c in challenge_states:
current_state = ("loc" + str(i), c)
new_state = ("loc" + str(i+3), c)
key = (current_state, "up")
if key not in transitions:
transitions[key] = {}
transitions[key][new_state] = prob_action_succeed
transitions[key][current_state] = prob_action_fail
#print current_state, new_state, "up"
# action down
for i in [7, 8, 4, 6]:
for c in challenge_states:
current_state = ("loc" + str(i), c)
new_state = ("loc" + str(i-3), c)
key = (current_state, "down")
if key not in transitions:
transitions[key] = {}
transitions[key][new_state] = prob_action_succeed
transitions[key][current_state] = prob_action_fail
# action right
for i in [4, 5, 8, 2]:
for c in challenge_states:
current_state = ("loc" + str(i), c)
new_state = ("loc" + str(i+1), c)
key = (current_state, "right")
if key not in transitions:
transitions[key] = {}
transitions[key][new_state] = prob_action_succeed
transitions[key][current_state] = prob_action_fail
# action left
for i in [9, 5, 6, 3]:
for c in challenge_states:
current_state = ("loc" + str(i), c)
new_state = ("loc" + str(i-1), c)
key = (current_state, "left")
if key not in transitions:
transitions[key] = {}
transitions[key][new_state] = prob_action_succeed
transitions[key][current_state] = prob_action_fail
# MDP REWARDS??
rewards={}
for state in all_states:
if state is not ('loc9', ('1', '1', '1', '1', '1')):
if state not in rewards:
rewards[state] = {}
rewards[state] = 0
rewards[('loc9', ('1', '1', '1', '1', '1'))] = 100
rewards[('loc7', ('1', '1', '1', '1', '1'))] = 100
rewards[('loc1', ('1', '1', '1', '1', '1'))] = 100
#mdp.terminals = ('loc9', ('1', '1', '1'))
#print rewards
#rewards = {('loc9', ('1', '1', '1')): 100}
# {'station_2': 1.0, 'station_3': -0.4, 'station_4': -0.4, 'station_5': -0.4, 'station_7': -0.4, 'station_8': -0.4, 'station_9': -0.4}
gamma = 0.9
mdp = MDP(all_states, actions, init, rewards, transitions, terminals, gamma)
#print len(all_states)
#print all_states
#print actions
#print init
#s1 (('0', '0', '0'), ('1', '0', '0')) p 0.8 s ('loc7', ('0', '0', '0')) a do challenge
|
[
"import itertools\nfrom mdp import *\n\nactions = [\"up\", \"down\", \"left\", \"right\", \"do challenge\"]\nlocations = [\"loc\" + str(i) for i in range(1,10)]\n\nlocations_to_map = {}\nfor l in locations:\n locations_to_map[l] = l\nlocations_to_map[\"loc1\"] = \"station_3\"\nlocations_to_map[\"loc2\"] = \"home\"\nlocations_to_map[\"loc5\"] = \"corridor\"\nlocations_to_map[\"loc6\"] = \"recharging_terminal\"\nlocations_to_map[\"loc7\"] = \"station_2\"\nlocations_to_map[\"loc9\"] = \"station_1\"\n\n# 0 represents challenge not done, 1 otherwise\n# challenge 1 - 5\ninit = (\"loc2\", tuple(list(\"00000\")))\nterminals = []\nfor l in locations:\n terminals.append((l, tuple(list(\"11111\"))))\n\n# maps challenge number (1-5) to location?\nchallenge_locations = {}\nchallenge_locations[1] = \"loc9\" #mcts\nchallenge_locations[2] = \"loc7\" #reachability\nchallenge_locations[3] = \"loc1\" #vision1\nchallenge_locations[4] = \"loc1\" #vision2\nchallenge_locations[5] = \"loc1\" #vision3\n\n# get all possible challenge states\nchallenge_states = [tuple(list(\"00000\")), tuple(list(\"11111\"))]\nfor i in itertools.permutations(list(\"10000\")):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\n\nfor i in itertools.permutations(list(\"11000\")):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\n\nfor i in itertools.permutations(list(\"11100\")):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\n\nfor i in itertools.permutations(list(\"11110\")):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\n\nall_states = []\nfor loc in locations:\n for c in challenge_states:\n all_states.append((loc, c))\n\n# MDP TRANSITIONS\ntransitions = {}\nprob_succeed_challenge = 0.8\nprob_fail_challenge = 1.0 - prob_succeed_challenge\n\n# challenge location transitions\nfor num, loc in challenge_locations.iteritems():\n #print \"num\", num, \"loc\", loc\n # for every challenge state that has a 0 at index num-1, transition to 1 at index num-1\n # i.e. if challenge 1, every state with 0xxxx --> 1xxxx\n # assuming we don't allow robot to do the challenge again if it has already succeeded?\n for s in challenge_states:\n if s[num-1] == \"0\":\n current_state = (loc, s)\n new_state = list(s)[:]\n new_state[num-1] = \"1\"\n new_state = (loc, tuple(new_state))\n # state, action, new state, prob\n key = (current_state, \"do challenge\")\n if key not in transitions:\n transitions[key] = {}\n\n transitions[key][new_state] = prob_succeed_challenge\n transitions[key][current_state] = prob_fail_challenge\n #print current_state, new_state, \"do challenge\"\n # print current_state do challenge, current_state, prob of failing challenge\n\n# moving transitions\nprob_action_succeed = 0.8\nprob_action_fail = 1.0 - prob_action_succeed\n\n# action up\nfor i in [1, 4, 5, 3]:\n for c in challenge_states:\n current_state = (\"loc\" + str(i), c)\n new_state = (\"loc\" + str(i+3), c)\n\n key = (current_state, \"up\")\n if key not in transitions:\n transitions[key] = {}\n\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\n #print current_state, new_state, \"up\"\n\n# action down\nfor i in [7, 8, 4, 6]:\n for c in challenge_states:\n current_state = (\"loc\" + str(i), c)\n new_state = (\"loc\" + str(i-3), c)\n\n key = (current_state, \"down\")\n if key not in transitions:\n transitions[key] = {}\n\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\n\n\n# action right\nfor i in [4, 5, 8, 2]:\n for c in challenge_states:\n current_state = (\"loc\" + str(i), c)\n new_state = (\"loc\" + str(i+1), c)\n \n key = (current_state, \"right\")\n if key not in transitions:\n transitions[key] = {}\n \n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\n\n\n# action left\nfor i in [9, 5, 6, 3]:\n for c in challenge_states:\n current_state = (\"loc\" + str(i), c)\n new_state = (\"loc\" + str(i-1), c)\n\n key = (current_state, \"left\")\n if key not in transitions:\n transitions[key] = {}\n\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\n\n\n# MDP REWARDS??\nrewards={}\nfor state in all_states:\n if state is not ('loc9', ('1', '1', '1', '1', '1')):\n if state not in rewards:\n rewards[state] = {}\n rewards[state] = 0\nrewards[('loc9', ('1', '1', '1', '1', '1'))] = 100\nrewards[('loc7', ('1', '1', '1', '1', '1'))] = 100\nrewards[('loc1', ('1', '1', '1', '1', '1'))] = 100\n\n#mdp.terminals = ('loc9', ('1', '1', '1'))\n \n#print rewards\n#rewards = {('loc9', ('1', '1', '1')): 100}\n# {'station_2': 1.0, 'station_3': -0.4, 'station_4': -0.4, 'station_5': -0.4, 'station_7': -0.4, 'station_8': -0.4, 'station_9': -0.4}\n\ngamma = 0.9\nmdp = MDP(all_states, actions, init, rewards, transitions, terminals, gamma)\n\n#print len(all_states)\n#print all_states\n#print actions\n#print init\n#s1 (('0', '0', '0'), ('1', '0', '0')) p 0.8 s ('loc7', ('0', '0', '0')) a do challenge\n\n",
"import itertools\nfrom mdp import *\nactions = ['up', 'down', 'left', 'right', 'do challenge']\nlocations = [('loc' + str(i)) for i in range(1, 10)]\nlocations_to_map = {}\nfor l in locations:\n locations_to_map[l] = l\nlocations_to_map['loc1'] = 'station_3'\nlocations_to_map['loc2'] = 'home'\nlocations_to_map['loc5'] = 'corridor'\nlocations_to_map['loc6'] = 'recharging_terminal'\nlocations_to_map['loc7'] = 'station_2'\nlocations_to_map['loc9'] = 'station_1'\ninit = 'loc2', tuple(list('00000'))\nterminals = []\nfor l in locations:\n terminals.append((l, tuple(list('11111'))))\nchallenge_locations = {}\nchallenge_locations[1] = 'loc9'\nchallenge_locations[2] = 'loc7'\nchallenge_locations[3] = 'loc1'\nchallenge_locations[4] = 'loc1'\nchallenge_locations[5] = 'loc1'\nchallenge_states = [tuple(list('00000')), tuple(list('11111'))]\nfor i in itertools.permutations(list('10000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11100')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11110')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nall_states = []\nfor loc in locations:\n for c in challenge_states:\n all_states.append((loc, c))\ntransitions = {}\nprob_succeed_challenge = 0.8\nprob_fail_challenge = 1.0 - prob_succeed_challenge\nfor num, loc in challenge_locations.iteritems():\n for s in challenge_states:\n if s[num - 1] == '0':\n current_state = loc, s\n new_state = list(s)[:]\n new_state[num - 1] = '1'\n new_state = loc, tuple(new_state)\n key = current_state, 'do challenge'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_succeed_challenge\n transitions[key][current_state] = prob_fail_challenge\nprob_action_succeed = 0.8\nprob_action_fail = 1.0 - prob_action_succeed\nfor i in [1, 4, 5, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 3), c\n key = current_state, 'up'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [7, 8, 4, 6]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 3), c\n key = current_state, 'down'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [4, 5, 8, 2]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 1), c\n key = current_state, 'right'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [9, 5, 6, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 1), c\n key = current_state, 'left'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nrewards = {}\nfor state in all_states:\n if state is not ('loc9', ('1', '1', '1', '1', '1')):\n if state not in rewards:\n rewards[state] = {}\n rewards[state] = 0\nrewards['loc9', ('1', '1', '1', '1', '1')] = 100\nrewards['loc7', ('1', '1', '1', '1', '1')] = 100\nrewards['loc1', ('1', '1', '1', '1', '1')] = 100\ngamma = 0.9\nmdp = MDP(all_states, actions, init, rewards, transitions, terminals, gamma)\n",
"<import token>\nactions = ['up', 'down', 'left', 'right', 'do challenge']\nlocations = [('loc' + str(i)) for i in range(1, 10)]\nlocations_to_map = {}\nfor l in locations:\n locations_to_map[l] = l\nlocations_to_map['loc1'] = 'station_3'\nlocations_to_map['loc2'] = 'home'\nlocations_to_map['loc5'] = 'corridor'\nlocations_to_map['loc6'] = 'recharging_terminal'\nlocations_to_map['loc7'] = 'station_2'\nlocations_to_map['loc9'] = 'station_1'\ninit = 'loc2', tuple(list('00000'))\nterminals = []\nfor l in locations:\n terminals.append((l, tuple(list('11111'))))\nchallenge_locations = {}\nchallenge_locations[1] = 'loc9'\nchallenge_locations[2] = 'loc7'\nchallenge_locations[3] = 'loc1'\nchallenge_locations[4] = 'loc1'\nchallenge_locations[5] = 'loc1'\nchallenge_states = [tuple(list('00000')), tuple(list('11111'))]\nfor i in itertools.permutations(list('10000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11100')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11110')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nall_states = []\nfor loc in locations:\n for c in challenge_states:\n all_states.append((loc, c))\ntransitions = {}\nprob_succeed_challenge = 0.8\nprob_fail_challenge = 1.0 - prob_succeed_challenge\nfor num, loc in challenge_locations.iteritems():\n for s in challenge_states:\n if s[num - 1] == '0':\n current_state = loc, s\n new_state = list(s)[:]\n new_state[num - 1] = '1'\n new_state = loc, tuple(new_state)\n key = current_state, 'do challenge'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_succeed_challenge\n transitions[key][current_state] = prob_fail_challenge\nprob_action_succeed = 0.8\nprob_action_fail = 1.0 - prob_action_succeed\nfor i in [1, 4, 5, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 3), c\n key = current_state, 'up'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [7, 8, 4, 6]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 3), c\n key = current_state, 'down'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [4, 5, 8, 2]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 1), c\n key = current_state, 'right'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [9, 5, 6, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 1), c\n key = current_state, 'left'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nrewards = {}\nfor state in all_states:\n if state is not ('loc9', ('1', '1', '1', '1', '1')):\n if state not in rewards:\n rewards[state] = {}\n rewards[state] = 0\nrewards['loc9', ('1', '1', '1', '1', '1')] = 100\nrewards['loc7', ('1', '1', '1', '1', '1')] = 100\nrewards['loc1', ('1', '1', '1', '1', '1')] = 100\ngamma = 0.9\nmdp = MDP(all_states, actions, init, rewards, transitions, terminals, gamma)\n",
"<import token>\n<assignment token>\nfor l in locations:\n locations_to_map[l] = l\n<assignment token>\nfor l in locations:\n terminals.append((l, tuple(list('11111'))))\n<assignment token>\nfor i in itertools.permutations(list('10000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11000')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11100')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\nfor i in itertools.permutations(list('11110')):\n if i not in challenge_states:\n challenge_states.append(tuple(list(i)))\n<assignment token>\nfor loc in locations:\n for c in challenge_states:\n all_states.append((loc, c))\n<assignment token>\nfor num, loc in challenge_locations.iteritems():\n for s in challenge_states:\n if s[num - 1] == '0':\n current_state = loc, s\n new_state = list(s)[:]\n new_state[num - 1] = '1'\n new_state = loc, tuple(new_state)\n key = current_state, 'do challenge'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_succeed_challenge\n transitions[key][current_state] = prob_fail_challenge\n<assignment token>\nfor i in [1, 4, 5, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 3), c\n key = current_state, 'up'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [7, 8, 4, 6]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 3), c\n key = current_state, 'down'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [4, 5, 8, 2]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i + 1), c\n key = current_state, 'right'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\nfor i in [9, 5, 6, 3]:\n for c in challenge_states:\n current_state = 'loc' + str(i), c\n new_state = 'loc' + str(i - 1), c\n key = current_state, 'left'\n if key not in transitions:\n transitions[key] = {}\n transitions[key][new_state] = prob_action_succeed\n transitions[key][current_state] = prob_action_fail\n<assignment token>\nfor state in all_states:\n if state is not ('loc9', ('1', '1', '1', '1', '1')):\n if state not in rewards:\n rewards[state] = {}\n rewards[state] = 0\n<assignment token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
99,589 |
7193ad57a1f261fa45d456117e70c3efa66210be
|
from collections import OrderedDict
from datetime import datetime
import cv2 as cv
import numpy as np
from scipy.spatial import distance
class CentroidAlgorithm:
def __init__(self, maxNumberOfDisapper=30):
self.nextObject = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxNumberOfDisapper = maxNumberOfDisapper
def __del__(self):
del self.objects
del self.disappeared
print("all objects deregisterd")
def register(self, centroidValues):
self.registerEvent(centroidValues, None)
def registerEvent(self, centroidValues, EntryTime):
print("object registered at ",self.nextObject, ": ", centroidValues)
self.objects[self.nextObject] = {"centroid":centroidValues, "entrytime": EntryTime, "isUpdated": False}
self.disappeared[self.nextObject] = 0
self.nextObject += 1
def deregister(self, objectID):
print("object deregistered at ", objectID,": ", self.objects[objectID])
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rectange, EntryTime):
# check if length of box is 0 if this then count number of occurence
# this disappearence and count disappearence to deregister the object
if len(rectange) == 0:
# get every object
for objectId in list(self.objects.keys()):
self.disappeared[objectId] += 1
# check agains the maxdisappearence of the objects
# if so deregister that object
if self.disappeared[objectId] > self.maxNumberOfDisapper:
self.deregister(objectId)
# return the objects
return self.objects
# if not that case get the rectange calculate the distance from previes
# first store the number of centroids in CurrentCentroids
# make its shape of rectange
currentCentroids = np.zeros((len(rectange), 2), dtype="int")
# get coordinates of the box
for (i, (startX, startY, height, width)) in enumerate(rectange):
# calculate centroid of the box frame
X_centroid = int((2 * startX + height) / 2.0)
Y_centroid = int((2 * startY + width) / 2.0)
# push into currentCentroids for further use
currentCentroids[i] = (X_centroid, Y_centroid)
# initial condition check againts the updated centroid array
# register the objects upto how many box we have
if len(self.objects) == 0:
for i in range(0, len(currentCentroids)):
self.register(currentCentroids[i])
if self.objects[self.nextObject-1]["isUpdated"] == False:
self.objects[self.nextObject-1]["entrytime"] = EntryTime
self.objects[self.nextObject-1]["isUpdated"] = True
# if not get the eculidean distance between the previous centroid of frame from objects[objectID] to currentCentroids
else:
objectIDs = list(self.objects.keys())
objectsValue = list(self.objects.values())
# print(objectsValue)
objectCentroids = [list(value["centroid"]) for value in objectsValue]
# print(objectCentroids)
# find eculidean distance between previous frame centroid to current frame centroid
eculideanDistance = distance.cdist(np.array(objectCentroids), currentCentroids)
# get the minimum distance between two centeroids
# now eculideanDistance is of the size len(currentCentroids) X len(currentCentroids)
# so in this every ith row is first input's ith array
# and column represents second input's ith array
# axis=1 for rowise check
# first find overall minimum
rows = eculideanDistance.min(axis=1).argsort()
# since we get row we want to find column to get perticular index
cols = eculideanDistance.argmin(axis=1)[rows]
# keep tack of which of the column we examined
usedRows = set() #as set doesn't duplicate
usedCols = set()
for (row, col) in zip(rows, cols):
# if alredy examined do nothing
if row in usedRows or col in usedCols:
continue
# else update the centroid
objectID = objectIDs[row] # since objectCentroids is first row argument
self.objects[objectID]["centroid"] = currentCentroids[col] # since currentCentroids is column row argument
self.disappeared[objectID] = 0
# update this row and column to indicate we examined
usedRows.add(row)
usedCols.add(col)
# there are may be some unused rows and columns
unusedRows = set(range(0, eculideanDistance.shape[0])).difference(usedRows)
unusedCols = set(range(0, eculideanDistance.shape[1])).difference(usedCols)
# check the number of object centroid and current centroid
# if it is greater than or equal to current centroid
# we need to check and see some of the object disapperead
if len(np.array(objectCentroids)) >= len(currentCentroids):
# check in unused rows
for row in unusedRows:
# get the objectId and increment the disappreance
objectID = objectIDs[row]
self.disappeared[objectID] += 1
# check for maximum to deregsiter the object
if self.disappeared[objectID] > self.maxNumberOfDisapper:
self.deregister(objectID)
# if object centroid is less than currentcentroid then new object has
# arrieved register the object
else:
for col in unusedCols:
self.register(currentCentroids[col])
if self.objects[self.nextObject - 1]["isUpdated"] == False:
self.objects[self.nextObject-1]["entrytime"] = EntryTime
self.objects[self.nextObject-1]["isUpdated"] = True
return self.objects
class EventCapture:
def __init__(self):
self.starttimer = None
def startTimer(self):
self.starttimer = datetime.now()
def event(self):
return (datetime.now() - self.starttimer)
# Set the model
model = "../ssd_mobilenet_v3_large_coco_2020_01_14/frozen_inference_graph.pb"
config = "../config_file/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
DL_model = cv.dnn_DetectionModel(model, config)
# Set the labels
labels = []
with open("../labels.txt") as labelFile:
labels = labelFile.read().split("\n")
labels.pop()
DL_model.setInputSize(320, 320)
DL_model.setInputScale(1.0/127.0)
DL_model.setInputMean((127.5, 127.5, 127.5))
DL_model.setInputSwapRB(True)
def resizeScaleFrame(frame, scale=0.25):
height = frame.shape[0]
width = frame.shape[1]
scale_dimenstion = (int(width * scale), int(height * scale))
model_dimenstion = (320, 320)
scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC)
model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC)
return [scale_img, model_img]
# capture the video
capture_video = cv.VideoCapture(0)
timer = EventCapture()
timer.startTimer()
centroidAlgo = CentroidAlgorithm()
def detectTheObject(frame):
class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)
rectange = []
if len(class_indexes) > 0:
for class_index, confidence, border_box in zip(class_indexes.flatten(), confidence_levels.flatten(), border_boxes):
#check only persons and make border for them
if confidence > 0.61:
if class_index != 1:
continue
print("{}, {}, {}".format(class_indexes[:,0], border_boxes[:], confidence_levels[:]))
rectange.append(border_box.astype("int"))
cv.rectangle(frame, border_box, (255,0, 0), 2)
cv.putText(frame, labels[class_index - 1], (border_box[0], border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0,255,255),thickness=1)
objects = centroidAlgo.update(rectange, EntryTime=timer.event())
for (ObjectID, info) in objects.items():
# print(info["entrytime"])
text = "ID {}, st {}, T {}".format(ObjectID, timer.starttimer.strftime("%S.%f"), str(info["entrytime"]).split(":")[2])
cv.putText(frame, text, (info["centroid"][0], info["centroid"][1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0,255,255),thickness=1)
cv.circle(frame,(info["centroid"][0], info["centroid"][1]), 4, (255,0,0), -1)
return frame
while True:
isTrue, frame = capture_video.read()
if not isTrue:
print("Error!! unable to read the frame")
break
#resize the frame and show as video
# new_frame = resizeScaleFrame(frame)
# scaled_frame = detectTheObject(new_frame[0])
# cv.imshow("test", new_frame[0])
mapped_frame = detectTheObject(frame)
# cv.imshow("scaled_video", scaled_frame)
cv.imshow("mapped_video", mapped_frame)
if cv.waitKey(20) & 0xFF == ord("s"):
break
capture_video.release()
cv.destroyAllWindows()
cv.waitKey(0)
|
[
"from collections import OrderedDict\nfrom datetime import datetime\nimport cv2 as cv\nimport numpy as np\nfrom scipy.spatial import distance\n\nclass CentroidAlgorithm:\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n \n def __del__(self):\n del self.objects\n del self.disappeared\n print(\"all objects deregisterd\")\n \n def register(self, centroidValues):\n self.registerEvent(centroidValues, None) \n\n def registerEvent(self, centroidValues, EntryTime):\n print(\"object registered at \",self.nextObject, \": \", centroidValues)\n self.objects[self.nextObject] = {\"centroid\":centroidValues, \"entrytime\": EntryTime, \"isUpdated\": False}\n\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n \n\n def deregister(self, objectID):\n print(\"object deregistered at \", objectID,\": \", self.objects[objectID])\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n # check if length of box is 0 if this then count number of occurence\n # this disappearence and count disappearence to deregister the object\n if len(rectange) == 0:\n # get every object \n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n # check agains the maxdisappearence of the objects\n # if so deregister that object\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n \n # return the objects\n return self.objects\n\n\n # if not that case get the rectange calculate the distance from previes \n # first store the number of centroids in CurrentCentroids\n # make its shape of rectange\n currentCentroids = np.zeros((len(rectange), 2), dtype=\"int\")\n # get coordinates of the box\n for (i, (startX, startY, height, width)) in enumerate(rectange):\n # calculate centroid of the box frame\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n # push into currentCentroids for further use\n currentCentroids[i] = (X_centroid, Y_centroid)\n\n # initial condition check againts the updated centroid array \n # register the objects upto how many box we have\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject-1][\"isUpdated\"] == False:\n self.objects[self.nextObject-1][\"entrytime\"] = EntryTime\n self.objects[self.nextObject-1][\"isUpdated\"] = True\n \n # if not get the eculidean distance between the previous centroid of frame from objects[objectID] to currentCentroids\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n # print(objectsValue)\n objectCentroids = [list(value[\"centroid\"]) for value in objectsValue]\n # print(objectCentroids)\n # find eculidean distance between previous frame centroid to current frame centroid\n eculideanDistance = distance.cdist(np.array(objectCentroids), currentCentroids)\n # get the minimum distance between two centeroids\n # now eculideanDistance is of the size len(currentCentroids) X len(currentCentroids)\n # so in this every ith row is first input's ith array\n # and column represents second input's ith array\n # axis=1 for rowise check\n\n # first find overall minimum\n rows = eculideanDistance.min(axis=1).argsort()\n # since we get row we want to find column to get perticular index\n cols = eculideanDistance.argmin(axis=1)[rows]\n\n # keep tack of which of the column we examined\n usedRows = set() #as set doesn't duplicate\n usedCols = set()\n\n for (row, col) in zip(rows, cols):\n # if alredy examined do nothing\n if row in usedRows or col in usedCols:\n continue\n \n # else update the centroid\n objectID = objectIDs[row] # since objectCentroids is first row argument\n self.objects[objectID][\"centroid\"] = currentCentroids[col] # since currentCentroids is column row argument\n self.disappeared[objectID] = 0\n \n # update this row and column to indicate we examined\n usedRows.add(row)\n usedCols.add(col)\n \n # there are may be some unused rows and columns\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(usedCols)\n\n # check the number of object centroid and current centroid\n # if it is greater than or equal to current centroid \n # we need to check and see some of the object disapperead\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n # check in unused rows \n for row in unusedRows:\n # get the objectId and increment the disappreance\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n\n # check for maximum to deregsiter the object\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n\n # if object centroid is less than currentcentroid then new object has \n # arrieved register the object\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1][\"isUpdated\"] == False:\n self.objects[self.nextObject-1][\"entrytime\"] = EntryTime \n self.objects[self.nextObject-1][\"isUpdated\"] = True\n \n return self.objects \n \nclass EventCapture:\n def __init__(self):\n self.starttimer = None\n \n def startTimer(self):\n self.starttimer = datetime.now()\n \n \n def event(self):\n return (datetime.now() - self.starttimer)\n\n\n# Set the model\nmodel = \"../ssd_mobilenet_v3_large_coco_2020_01_14/frozen_inference_graph.pb\"\nconfig = \"../config_file/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt\"\nDL_model = cv.dnn_DetectionModel(model, config)\n\n# Set the labels\nlabels = []\nwith open(\"../labels.txt\") as labelFile:\n labels = labelFile.read().split(\"\\n\")\nlabels.pop()\n\nDL_model.setInputSize(320, 320)\nDL_model.setInputScale(1.0/127.0)\nDL_model.setInputMean((127.5, 127.5, 127.5))\nDL_model.setInputSwapRB(True)\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n \n scale_dimenstion = (int(width * scale), int(height * scale))\n model_dimenstion = (320, 320)\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC)\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC)\n return [scale_img, model_img]\n\n\n\n# capture the video\ncapture_video = cv.VideoCapture(0)\ntimer = EventCapture()\ntimer.startTimer()\ncentroidAlgo = CentroidAlgorithm()\n\ndef detectTheObject(frame):\n class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)\n rectange = []\n if len(class_indexes) > 0:\n for class_index, confidence, border_box in zip(class_indexes.flatten(), confidence_levels.flatten(), border_boxes):\n \n #check only persons and make border for them\n if confidence > 0.61:\n if class_index != 1:\n continue\n print(\"{}, {}, {}\".format(class_indexes[:,0], border_boxes[:], confidence_levels[:]))\n rectange.append(border_box.astype(\"int\"))\n cv.rectangle(frame, border_box, (255,0, 0), 2)\n cv.putText(frame, labels[class_index - 1], (border_box[0], border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0,255,255),thickness=1)\n\n \n objects = centroidAlgo.update(rectange, EntryTime=timer.event())\n for (ObjectID, info) in objects.items():\n # print(info[\"entrytime\"])\n text = \"ID {}, st {}, T {}\".format(ObjectID, timer.starttimer.strftime(\"%S.%f\"), str(info[\"entrytime\"]).split(\":\")[2])\n cv.putText(frame, text, (info[\"centroid\"][0], info[\"centroid\"][1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0,255,255),thickness=1)\n cv.circle(frame,(info[\"centroid\"][0], info[\"centroid\"][1]), 4, (255,0,0), -1)\n return frame\n\nwhile True:\n isTrue, frame = capture_video.read()\n\n if not isTrue:\n print(\"Error!! unable to read the frame\")\n break\n \n #resize the frame and show as video\n # new_frame = resizeScaleFrame(frame)\n # scaled_frame = detectTheObject(new_frame[0])\n # cv.imshow(\"test\", new_frame[0])\n mapped_frame = detectTheObject(frame)\n # cv.imshow(\"scaled_video\", scaled_frame)\n cv.imshow(\"mapped_video\", mapped_frame)\n\n if cv.waitKey(20) & 0xFF == ord(\"s\"):\n break\n\ncapture_video.release()\ncv.destroyAllWindows()\n\ncv.waitKey(0)",
"from collections import OrderedDict\nfrom datetime import datetime\nimport cv2 as cv\nimport numpy as np\nfrom scipy.spatial import distance\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\nmodel = '../ssd_mobilenet_v3_large_coco_2020_01_14/frozen_inference_graph.pb'\nconfig = '../config_file/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'\nDL_model = cv.dnn_DetectionModel(model, config)\nlabels = []\nwith open('../labels.txt') as labelFile:\n labels = labelFile.read().split('\\n')\nlabels.pop()\nDL_model.setInputSize(320, 320)\nDL_model.setInputScale(1.0 / 127.0)\nDL_model.setInputMean((127.5, 127.5, 127.5))\nDL_model.setInputSwapRB(True)\n\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n scale_dimenstion = int(width * scale), int(height * scale)\n model_dimenstion = 320, 320\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC\n )\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC\n )\n return [scale_img, model_img]\n\n\ncapture_video = cv.VideoCapture(0)\ntimer = EventCapture()\ntimer.startTimer()\ncentroidAlgo = CentroidAlgorithm()\n\n\ndef detectTheObject(frame):\n class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)\n rectange = []\n if len(class_indexes) > 0:\n for class_index, confidence, border_box in zip(class_indexes.\n flatten(), confidence_levels.flatten(), border_boxes):\n if confidence > 0.61:\n if class_index != 1:\n continue\n print('{}, {}, {}'.format(class_indexes[:, 0], border_boxes\n [:], confidence_levels[:]))\n rectange.append(border_box.astype('int'))\n cv.rectangle(frame, border_box, (255, 0, 0), 2)\n cv.putText(frame, labels[class_index - 1], (border_box[0],\n border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color\n =(0, 255, 255), thickness=1)\n objects = centroidAlgo.update(rectange, EntryTime=timer.event())\n for ObjectID, info in objects.items():\n text = 'ID {}, st {}, T {}'.format(ObjectID, timer.starttimer.\n strftime('%S.%f'), str(info['entrytime']).split(':')[2])\n cv.putText(frame, text, (info['centroid'][0], info['centroid'][\n 1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0, 255, 255),\n thickness=1)\n cv.circle(frame, (info['centroid'][0], info['centroid'][1]), 4,\n (255, 0, 0), -1)\n return frame\n\n\nwhile True:\n isTrue, frame = capture_video.read()\n if not isTrue:\n print('Error!! unable to read the frame')\n break\n mapped_frame = detectTheObject(frame)\n cv.imshow('mapped_video', mapped_frame)\n if cv.waitKey(20) & 255 == ord('s'):\n break\ncapture_video.release()\ncv.destroyAllWindows()\ncv.waitKey(0)\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\nmodel = '../ssd_mobilenet_v3_large_coco_2020_01_14/frozen_inference_graph.pb'\nconfig = '../config_file/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'\nDL_model = cv.dnn_DetectionModel(model, config)\nlabels = []\nwith open('../labels.txt') as labelFile:\n labels = labelFile.read().split('\\n')\nlabels.pop()\nDL_model.setInputSize(320, 320)\nDL_model.setInputScale(1.0 / 127.0)\nDL_model.setInputMean((127.5, 127.5, 127.5))\nDL_model.setInputSwapRB(True)\n\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n scale_dimenstion = int(width * scale), int(height * scale)\n model_dimenstion = 320, 320\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC\n )\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC\n )\n return [scale_img, model_img]\n\n\ncapture_video = cv.VideoCapture(0)\ntimer = EventCapture()\ntimer.startTimer()\ncentroidAlgo = CentroidAlgorithm()\n\n\ndef detectTheObject(frame):\n class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)\n rectange = []\n if len(class_indexes) > 0:\n for class_index, confidence, border_box in zip(class_indexes.\n flatten(), confidence_levels.flatten(), border_boxes):\n if confidence > 0.61:\n if class_index != 1:\n continue\n print('{}, {}, {}'.format(class_indexes[:, 0], border_boxes\n [:], confidence_levels[:]))\n rectange.append(border_box.astype('int'))\n cv.rectangle(frame, border_box, (255, 0, 0), 2)\n cv.putText(frame, labels[class_index - 1], (border_box[0],\n border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color\n =(0, 255, 255), thickness=1)\n objects = centroidAlgo.update(rectange, EntryTime=timer.event())\n for ObjectID, info in objects.items():\n text = 'ID {}, st {}, T {}'.format(ObjectID, timer.starttimer.\n strftime('%S.%f'), str(info['entrytime']).split(':')[2])\n cv.putText(frame, text, (info['centroid'][0], info['centroid'][\n 1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0, 255, 255),\n thickness=1)\n cv.circle(frame, (info['centroid'][0], info['centroid'][1]), 4,\n (255, 0, 0), -1)\n return frame\n\n\nwhile True:\n isTrue, frame = capture_video.read()\n if not isTrue:\n print('Error!! unable to read the frame')\n break\n mapped_frame = detectTheObject(frame)\n cv.imshow('mapped_video', mapped_frame)\n if cv.waitKey(20) & 255 == ord('s'):\n break\ncapture_video.release()\ncv.destroyAllWindows()\ncv.waitKey(0)\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\nwith open('../labels.txt') as labelFile:\n labels = labelFile.read().split('\\n')\nlabels.pop()\nDL_model.setInputSize(320, 320)\nDL_model.setInputScale(1.0 / 127.0)\nDL_model.setInputMean((127.5, 127.5, 127.5))\nDL_model.setInputSwapRB(True)\n\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n scale_dimenstion = int(width * scale), int(height * scale)\n model_dimenstion = 320, 320\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC\n )\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC\n )\n return [scale_img, model_img]\n\n\n<assignment token>\ntimer.startTimer()\n<assignment token>\n\n\ndef detectTheObject(frame):\n class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)\n rectange = []\n if len(class_indexes) > 0:\n for class_index, confidence, border_box in zip(class_indexes.\n flatten(), confidence_levels.flatten(), border_boxes):\n if confidence > 0.61:\n if class_index != 1:\n continue\n print('{}, {}, {}'.format(class_indexes[:, 0], border_boxes\n [:], confidence_levels[:]))\n rectange.append(border_box.astype('int'))\n cv.rectangle(frame, border_box, (255, 0, 0), 2)\n cv.putText(frame, labels[class_index - 1], (border_box[0],\n border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color\n =(0, 255, 255), thickness=1)\n objects = centroidAlgo.update(rectange, EntryTime=timer.event())\n for ObjectID, info in objects.items():\n text = 'ID {}, st {}, T {}'.format(ObjectID, timer.starttimer.\n strftime('%S.%f'), str(info['entrytime']).split(':')[2])\n cv.putText(frame, text, (info['centroid'][0], info['centroid'][\n 1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0, 255, 255),\n thickness=1)\n cv.circle(frame, (info['centroid'][0], info['centroid'][1]), 4,\n (255, 0, 0), -1)\n return frame\n\n\nwhile True:\n isTrue, frame = capture_video.read()\n if not isTrue:\n print('Error!! unable to read the frame')\n break\n mapped_frame = detectTheObject(frame)\n cv.imshow('mapped_video', mapped_frame)\n if cv.waitKey(20) & 255 == ord('s'):\n break\ncapture_video.release()\ncv.destroyAllWindows()\ncv.waitKey(0)\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n scale_dimenstion = int(width * scale), int(height * scale)\n model_dimenstion = 320, 320\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC\n )\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC\n )\n return [scale_img, model_img]\n\n\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef detectTheObject(frame):\n class_indexes, confidence_levels, border_boxes = DL_model.detect(frame)\n rectange = []\n if len(class_indexes) > 0:\n for class_index, confidence, border_box in zip(class_indexes.\n flatten(), confidence_levels.flatten(), border_boxes):\n if confidence > 0.61:\n if class_index != 1:\n continue\n print('{}, {}, {}'.format(class_indexes[:, 0], border_boxes\n [:], confidence_levels[:]))\n rectange.append(border_box.astype('int'))\n cv.rectangle(frame, border_box, (255, 0, 0), 2)\n cv.putText(frame, labels[class_index - 1], (border_box[0],\n border_box[1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color\n =(0, 255, 255), thickness=1)\n objects = centroidAlgo.update(rectange, EntryTime=timer.event())\n for ObjectID, info in objects.items():\n text = 'ID {}, st {}, T {}'.format(ObjectID, timer.starttimer.\n strftime('%S.%f'), str(info['entrytime']).split(':')[2])\n cv.putText(frame, text, (info['centroid'][0], info['centroid'][\n 1]), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, color=(0, 255, 255),\n thickness=1)\n cv.circle(frame, (info['centroid'][0], info['centroid'][1]), 4,\n (255, 0, 0), -1)\n return frame\n\n\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n\n\ndef resizeScaleFrame(frame, scale=0.25):\n height = frame.shape[0]\n width = frame.shape[1]\n scale_dimenstion = int(width * scale), int(height * scale)\n model_dimenstion = 320, 320\n scale_img = cv.resize(frame, scale_dimenstion, interpolation=cv.INTER_CUBIC\n )\n model_img = cv.resize(frame, model_dimenstion, interpolation=cv.INTER_CUBIC\n )\n return [scale_img, model_img]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n\n def register(self, centroidValues):\n self.registerEvent(centroidValues, None)\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n\n def __del__(self):\n del self.objects\n del self.disappeared\n print('all objects deregisterd')\n <function token>\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n <function token>\n <function token>\n\n def registerEvent(self, centroidValues, EntryTime):\n print('object registered at ', self.nextObject, ': ', centroidValues)\n self.objects[self.nextObject] = {'centroid': centroidValues,\n 'entrytime': EntryTime, 'isUpdated': False}\n self.disappeared[self.nextObject] = 0\n self.nextObject += 1\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n\n def __init__(self, maxNumberOfDisapper=30):\n self.nextObject = 0\n self.objects = OrderedDict()\n self.disappeared = OrderedDict()\n self.maxNumberOfDisapper = maxNumberOfDisapper\n <function token>\n <function token>\n <function token>\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n <function token>\n <function token>\n <function token>\n <function token>\n\n def deregister(self, objectID):\n print('object deregistered at ', objectID, ': ', self.objects[objectID]\n )\n del self.objects[objectID]\n del self.disappeared[objectID]\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update(self, rectange, EntryTime):\n if len(rectange) == 0:\n for objectId in list(self.objects.keys()):\n self.disappeared[objectId] += 1\n if self.disappeared[objectId] > self.maxNumberOfDisapper:\n self.deregister(objectId)\n return self.objects\n currentCentroids = np.zeros((len(rectange), 2), dtype='int')\n for i, (startX, startY, height, width) in enumerate(rectange):\n X_centroid = int((2 * startX + height) / 2.0)\n Y_centroid = int((2 * startY + width) / 2.0)\n currentCentroids[i] = X_centroid, Y_centroid\n if len(self.objects) == 0:\n for i in range(0, len(currentCentroids)):\n self.register(currentCentroids[i])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n else:\n objectIDs = list(self.objects.keys())\n objectsValue = list(self.objects.values())\n objectCentroids = [list(value['centroid']) for value in\n objectsValue]\n eculideanDistance = distance.cdist(np.array(objectCentroids),\n currentCentroids)\n rows = eculideanDistance.min(axis=1).argsort()\n cols = eculideanDistance.argmin(axis=1)[rows]\n usedRows = set()\n usedCols = set()\n for row, col in zip(rows, cols):\n if row in usedRows or col in usedCols:\n continue\n objectID = objectIDs[row]\n self.objects[objectID]['centroid'] = currentCentroids[col]\n self.disappeared[objectID] = 0\n usedRows.add(row)\n usedCols.add(col)\n unusedRows = set(range(0, eculideanDistance.shape[0])).difference(\n usedRows)\n unusedCols = set(range(0, eculideanDistance.shape[1])).difference(\n usedCols)\n if len(np.array(objectCentroids)) >= len(currentCentroids):\n for row in unusedRows:\n objectID = objectIDs[row]\n self.disappeared[objectID] += 1\n if self.disappeared[objectID] > self.maxNumberOfDisapper:\n self.deregister(objectID)\n else:\n for col in unusedCols:\n self.register(currentCentroids[col])\n if self.objects[self.nextObject - 1]['isUpdated'] == False:\n self.objects[self.nextObject - 1]['entrytime'\n ] = EntryTime\n self.objects[self.nextObject - 1]['isUpdated'] = True\n return self.objects\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass CentroidAlgorithm:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n\n def event(self):\n return datetime.now() - self.starttimer\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n\n def startTimer(self):\n self.starttimer = datetime.now()\n <function token>\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass EventCapture:\n\n def __init__(self):\n self.starttimer = None\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass EventCapture:\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
99,590 |
12d217e79c096774b16408f47c8a40866d6522b6
|
#Declaring Dependacies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Gender_Age_DF import Gender_Age_2017
from Gender_Age_DF import Gender_Age_2016
from Gender_Age_DF import Gender_Age_2015
from Gender_Age_DF import Gender_Age_2014
from Gender_Age_DF import Gender_Age_2013
from Gender_Age_DF import Gender_Age_2012
from Gender_Age_DF import Gender_Age_2011
#Creating Global Variables to pull DataFrame into Notebook
Gender_Age_2017_DF = Gender_Age_2017()
Gender_Age_2016_DF = Gender_Age_2016()
Gender_Age_2015_DF = Gender_Age_2015()
Gender_Age_2014_DF = Gender_Age_2014()
Gender_Age_2013_DF = Gender_Age_2013()
Gender_Age_2012_DF = Gender_Age_2012()
Gender_Age_2011_DF = Gender_Age_2011()
#Gender_Age_2017_DF
Comp2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF["Industry_x"]=="Computer and Math")]
Eng2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF["Industry_x"]=="Engineering")]
Ent2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF["Industry_x"]=="Entertainmnet")]
HC2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF["Industry_x"]=="Healthcare")]
Serv2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF["Industry_x"]=="Service")]
#Gender_Age_2016_DF
Comp2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF["Industry_x"]=="Computer and Math")]
Eng2016= Gender_Age_2016_DF.loc[(Gender_Age_2016_DF["Industry_x"]=="Engineering")]
Ent2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF["Industry_x"]=="Entertainmnet")]
HC2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF["Industry_x"]=="Healthcare")]
Serv2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF["Industry_x"]=="Service")]
#Gender_Age_2015_DF
Comp2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF["Industry_x"]=="Computer and Math")]
Eng2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF["Industry_x"]=="Engineering")]
Ent2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF["Industry_x"]=="Entertainmnet")]
HC2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF["Industry_x"]=="Healthcare")]
Serv2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF["Industry_x"]=="Service")]
#Gender_Age_2014_DF
Comp2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF["Industry_x"]=="Computer and Math")]
Eng2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF["Industry_x"]=="Engineering")]
Ent2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF["Industry_x"]=="Entertainmnet")]
HC2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF["Industry_x"]=="Healthcare")]
Serv2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF["Industry_x"]=="Service")]
#Gender_Age_2013_DF
Comp2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF["Industry_x"]=="Computer and Math")]
Eng2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF["Industry_x"]=="Engineering")]
Ent2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF["Industry_x"]=="Entertainmnet")]
HC2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF["Industry_x"]=="Healthcare")]
Serv2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF["Industry_x"]=="Service")]
#Gender_Age_2012_DF
Comp2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF["Industry_x"]=="Computer and Math")]
Eng2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF["Industry_x"]=="Engineering")]
Ent2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF["Industry_x"]=="Entertainmnet")]
HC2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF["Industry_x"]=="Healthcare")]
Serv2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF["Industry_x"]=="Service")]
#Gender_Age_2011_DF
Comp2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF["Industry_x"]=="Computer and Math")]
Eng2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF["Industry_x"]=="Engineering")]
Ent2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF["Industry_x"]=="Entertainmnet")]
HC2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF["Industry_x"]=="Healthcare")]
Serv2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF["Industry_x"]=="Service")]
#Line Graph for Total # of Workers by Industry per Year
#Storing total employee values
Total_Emp = []
#HealtheCare Total
HCTotal2017 = HC2017['Total Number of workers'].sum()
HCTotal2016 = HC2016['Total Number of workers'].sum()
HCTotal2015 = HC2015['Total Number of workers'].sum()
HCTotal2014 = HC2014['Total Number of workers'].sum()
HCTotal2013 = HC2013['Total Number of workers'].sum()
HCTotal2012 = HC2012['Total Number of workers'].sum()
HCTotal2011 = HC2011['Total Number of workers'].sum()
#Entertainment Total
EntTotal2017 = Ent2017['Total Number of workers'].sum()
EntTotal2016 = Ent2016['Total Number of workers'].sum()
EntTotal2015 = Ent2015['Total Number of workers'].sum()
EntTotal2014 = Ent2014['Total Number of workers'].sum()
EntTotal2013 = Ent2013['Total Number of workers'].sum()
EntTotal2012 = Ent2012['Total Number of workers'].sum()
EntTotal2011 = Ent2011['Total Number of workers'].sum()
#Service Total
ServTotal2017 = Serv2017['Total Number of workers'].sum()
ServTotal2016 = Serv2016['Total Number of workers'].sum()
ServTotal2015 = Serv2015['Total Number of workers'].sum()
ServTotal2014 = Serv2014['Total Number of workers'].sum()
ServTotal2013 = Serv2013['Total Number of workers'].sum()
ServTotal2012 = Serv2012['Total Number of workers'].sum()
ServTotal2011 = Serv2011['Total Number of workers'].sum()
#Computer & Math Total
Comptotal2017 = Comp2017['Total Number of workers'].sum()
Comptotal2016 = Comp2016['Total Number of workers'].sum()
Comptotal2015 = Comp2015['Total Number of workers'].sum()
Comptotal2014 = Comp2014['Total Number of workers'].sum()
Comptotal2013 = Comp2013['Total Number of workers'].sum()
Comptotal2012 = Comp2012['Total Number of workers'].sum()
Comptotal2011 = Comp2011['Total Number of workers'].sum()
#Engineering Total
Engtotal2017 = Eng2017['Total Number of workers'].sum()
Engtotal2016 = Eng2016['Total Number of workers'].sum()
Engtotal2015 = Eng2015['Total Number of workers'].sum()
Engtotal2014 = Eng2014['Total Number of workers'].sum()
Engtotal2013 = Eng2013['Total Number of workers'].sum()
Engtotal2012 = Eng2012['Total Number of workers'].sum()
Engtotal2011 = Eng2011['Total Number of workers'].sum()
year = [2011, 2012, 2013, 2014, 2015, 2016, 2017]
HC= [HCTotal2011, HCTotal2012, HCTotal2013, HCTotal2014, HCTotal2015, HCTotal2016, HCTotal2017]
Comp= [Comptotal2011, Comptotal2012, Comptotal2013, Comptotal2014, Comptotal2015, Comptotal2016, Comptotal2017]
Eng= [Engtotal2011, Engtotal2012, Engtotal2013, Engtotal2014, Engtotal2015, Engtotal2016, Engtotal2017]
Serv= [ServTotal2011, ServTotal2012, ServTotal2013, ServTotal2014, ServTotal2015, ServTotal2016, ServTotal2017]
Ent= [EntTotal2011, EntTotal2012, EntTotal2013, EntTotal2014, EntTotal2015, EntTotal2016, EntTotal2017]
#assinging values to line graph
plt.plot(year, HC, color='blue', label="Healthcare")
plt.plot(year, Comp, color='red', label="Computer & Math")
plt.plot(year, Eng, color='green', label="Engineering")
plt.plot(year, Serv, color='yellow', label="Service")
plt.plot(year, Ent, color='purple', label="Entertainment")
plt.title("Total Employees per Industry by Year")
plt.xlabel("Year")
plt.ylabel("Toal Employees")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.savefig("Industry Analysis")
|
[
"#Declaring Dependacies\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Gender_Age_DF import Gender_Age_2017\nfrom Gender_Age_DF import Gender_Age_2016\nfrom Gender_Age_DF import Gender_Age_2015\nfrom Gender_Age_DF import Gender_Age_2014\nfrom Gender_Age_DF import Gender_Age_2013\nfrom Gender_Age_DF import Gender_Age_2012\nfrom Gender_Age_DF import Gender_Age_2011\n\n#Creating Global Variables to pull DataFrame into Notebook\nGender_Age_2017_DF = Gender_Age_2017()\nGender_Age_2016_DF = Gender_Age_2016()\nGender_Age_2015_DF = Gender_Age_2015()\nGender_Age_2014_DF = Gender_Age_2014()\nGender_Age_2013_DF = Gender_Age_2013()\nGender_Age_2012_DF = Gender_Age_2012()\nGender_Age_2011_DF = Gender_Age_2011()\n\n#Gender_Age_2017_DF\nComp2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2017 = Gender_Age_2017_DF.loc[(Gender_Age_2017_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2016_DF\nComp2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2016= Gender_Age_2016_DF.loc[(Gender_Age_2016_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2016 = Gender_Age_2016_DF.loc[(Gender_Age_2016_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2015_DF\nComp2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2015 = Gender_Age_2015_DF.loc[(Gender_Age_2015_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2014_DF\nComp2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2014 = Gender_Age_2014_DF.loc[(Gender_Age_2014_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2013_DF\nComp2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2013 = Gender_Age_2013_DF.loc[(Gender_Age_2013_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2012_DF\nComp2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2012 = Gender_Age_2012_DF.loc[(Gender_Age_2012_DF[\"Industry_x\"]==\"Service\")]\n\n#Gender_Age_2011_DF\nComp2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF[\"Industry_x\"]==\"Computer and Math\")]\nEng2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF[\"Industry_x\"]==\"Engineering\")]\nEnt2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF[\"Industry_x\"]==\"Entertainmnet\")]\nHC2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF[\"Industry_x\"]==\"Healthcare\")]\nServ2011 = Gender_Age_2011_DF.loc[(Gender_Age_2011_DF[\"Industry_x\"]==\"Service\")]\n\n\n#Line Graph for Total # of Workers by Industry per Year\n#Storing total employee values\n\nTotal_Emp = []\n\n#HealtheCare Total\nHCTotal2017 = HC2017['Total Number of workers'].sum()\nHCTotal2016 = HC2016['Total Number of workers'].sum()\nHCTotal2015 = HC2015['Total Number of workers'].sum()\nHCTotal2014 = HC2014['Total Number of workers'].sum()\nHCTotal2013 = HC2013['Total Number of workers'].sum()\nHCTotal2012 = HC2012['Total Number of workers'].sum()\nHCTotal2011 = HC2011['Total Number of workers'].sum()\n\n#Entertainment Total\nEntTotal2017 = Ent2017['Total Number of workers'].sum()\nEntTotal2016 = Ent2016['Total Number of workers'].sum()\nEntTotal2015 = Ent2015['Total Number of workers'].sum()\nEntTotal2014 = Ent2014['Total Number of workers'].sum()\nEntTotal2013 = Ent2013['Total Number of workers'].sum()\nEntTotal2012 = Ent2012['Total Number of workers'].sum()\nEntTotal2011 = Ent2011['Total Number of workers'].sum()\n\n#Service Total\nServTotal2017 = Serv2017['Total Number of workers'].sum()\nServTotal2016 = Serv2016['Total Number of workers'].sum()\nServTotal2015 = Serv2015['Total Number of workers'].sum()\nServTotal2014 = Serv2014['Total Number of workers'].sum()\nServTotal2013 = Serv2013['Total Number of workers'].sum()\nServTotal2012 = Serv2012['Total Number of workers'].sum()\nServTotal2011 = Serv2011['Total Number of workers'].sum()\n\n#Computer & Math Total\nComptotal2017 = Comp2017['Total Number of workers'].sum()\nComptotal2016 = Comp2016['Total Number of workers'].sum()\nComptotal2015 = Comp2015['Total Number of workers'].sum()\nComptotal2014 = Comp2014['Total Number of workers'].sum()\nComptotal2013 = Comp2013['Total Number of workers'].sum()\nComptotal2012 = Comp2012['Total Number of workers'].sum()\nComptotal2011 = Comp2011['Total Number of workers'].sum()\n\n#Engineering Total\nEngtotal2017 = Eng2017['Total Number of workers'].sum()\nEngtotal2016 = Eng2016['Total Number of workers'].sum()\nEngtotal2015 = Eng2015['Total Number of workers'].sum()\nEngtotal2014 = Eng2014['Total Number of workers'].sum()\nEngtotal2013 = Eng2013['Total Number of workers'].sum()\nEngtotal2012 = Eng2012['Total Number of workers'].sum()\nEngtotal2011 = Eng2011['Total Number of workers'].sum()\n\nyear = [2011, 2012, 2013, 2014, 2015, 2016, 2017]\nHC= [HCTotal2011, HCTotal2012, HCTotal2013, HCTotal2014, HCTotal2015, HCTotal2016, HCTotal2017]\nComp= [Comptotal2011, Comptotal2012, Comptotal2013, Comptotal2014, Comptotal2015, Comptotal2016, Comptotal2017]\nEng= [Engtotal2011, Engtotal2012, Engtotal2013, Engtotal2014, Engtotal2015, Engtotal2016, Engtotal2017]\nServ= [ServTotal2011, ServTotal2012, ServTotal2013, ServTotal2014, ServTotal2015, ServTotal2016, ServTotal2017]\nEnt= [EntTotal2011, EntTotal2012, EntTotal2013, EntTotal2014, EntTotal2015, EntTotal2016, EntTotal2017]\n\n#assinging values to line graph\nplt.plot(year, HC, color='blue', label=\"Healthcare\")\nplt.plot(year, Comp, color='red', label=\"Computer & Math\")\nplt.plot(year, Eng, color='green', label=\"Engineering\")\nplt.plot(year, Serv, color='yellow', label=\"Service\")\nplt.plot(year, Ent, color='purple', label=\"Entertainment\")\n\nplt.title(\"Total Employees per Industry by Year\")\nplt.xlabel(\"Year\")\nplt.ylabel(\"Toal Employees\") \nplt.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\nplt.savefig(\"Industry Analysis\")\n",
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Gender_Age_DF import Gender_Age_2017\nfrom Gender_Age_DF import Gender_Age_2016\nfrom Gender_Age_DF import Gender_Age_2015\nfrom Gender_Age_DF import Gender_Age_2014\nfrom Gender_Age_DF import Gender_Age_2013\nfrom Gender_Age_DF import Gender_Age_2012\nfrom Gender_Age_DF import Gender_Age_2011\nGender_Age_2017_DF = Gender_Age_2017()\nGender_Age_2016_DF = Gender_Age_2016()\nGender_Age_2015_DF = Gender_Age_2015()\nGender_Age_2014_DF = Gender_Age_2014()\nGender_Age_2013_DF = Gender_Age_2013()\nGender_Age_2012_DF = Gender_Age_2012()\nGender_Age_2011_DF = Gender_Age_2011()\nComp2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Computer and Math']\nEng2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Engineering']\nEnt2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Healthcare']\nServ2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] == 'Service'\n ]\nComp2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Computer and Math']\nEng2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Engineering']\nEnt2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Healthcare']\nServ2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] == 'Service'\n ]\nComp2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Computer and Math']\nEng2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Engineering']\nEnt2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Healthcare']\nServ2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] == 'Service'\n ]\nComp2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Computer and Math']\nEng2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Engineering']\nEnt2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Healthcare']\nServ2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] == 'Service'\n ]\nComp2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Computer and Math']\nEng2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Engineering']\nEnt2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Healthcare']\nServ2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] == 'Service'\n ]\nComp2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Computer and Math']\nEng2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Engineering']\nEnt2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Healthcare']\nServ2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] == 'Service'\n ]\nComp2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Computer and Math']\nEng2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Engineering']\nEnt2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Healthcare']\nServ2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] == 'Service'\n ]\nTotal_Emp = []\nHCTotal2017 = HC2017['Total Number of workers'].sum()\nHCTotal2016 = HC2016['Total Number of workers'].sum()\nHCTotal2015 = HC2015['Total Number of workers'].sum()\nHCTotal2014 = HC2014['Total Number of workers'].sum()\nHCTotal2013 = HC2013['Total Number of workers'].sum()\nHCTotal2012 = HC2012['Total Number of workers'].sum()\nHCTotal2011 = HC2011['Total Number of workers'].sum()\nEntTotal2017 = Ent2017['Total Number of workers'].sum()\nEntTotal2016 = Ent2016['Total Number of workers'].sum()\nEntTotal2015 = Ent2015['Total Number of workers'].sum()\nEntTotal2014 = Ent2014['Total Number of workers'].sum()\nEntTotal2013 = Ent2013['Total Number of workers'].sum()\nEntTotal2012 = Ent2012['Total Number of workers'].sum()\nEntTotal2011 = Ent2011['Total Number of workers'].sum()\nServTotal2017 = Serv2017['Total Number of workers'].sum()\nServTotal2016 = Serv2016['Total Number of workers'].sum()\nServTotal2015 = Serv2015['Total Number of workers'].sum()\nServTotal2014 = Serv2014['Total Number of workers'].sum()\nServTotal2013 = Serv2013['Total Number of workers'].sum()\nServTotal2012 = Serv2012['Total Number of workers'].sum()\nServTotal2011 = Serv2011['Total Number of workers'].sum()\nComptotal2017 = Comp2017['Total Number of workers'].sum()\nComptotal2016 = Comp2016['Total Number of workers'].sum()\nComptotal2015 = Comp2015['Total Number of workers'].sum()\nComptotal2014 = Comp2014['Total Number of workers'].sum()\nComptotal2013 = Comp2013['Total Number of workers'].sum()\nComptotal2012 = Comp2012['Total Number of workers'].sum()\nComptotal2011 = Comp2011['Total Number of workers'].sum()\nEngtotal2017 = Eng2017['Total Number of workers'].sum()\nEngtotal2016 = Eng2016['Total Number of workers'].sum()\nEngtotal2015 = Eng2015['Total Number of workers'].sum()\nEngtotal2014 = Eng2014['Total Number of workers'].sum()\nEngtotal2013 = Eng2013['Total Number of workers'].sum()\nEngtotal2012 = Eng2012['Total Number of workers'].sum()\nEngtotal2011 = Eng2011['Total Number of workers'].sum()\nyear = [2011, 2012, 2013, 2014, 2015, 2016, 2017]\nHC = [HCTotal2011, HCTotal2012, HCTotal2013, HCTotal2014, HCTotal2015,\n HCTotal2016, HCTotal2017]\nComp = [Comptotal2011, Comptotal2012, Comptotal2013, Comptotal2014,\n Comptotal2015, Comptotal2016, Comptotal2017]\nEng = [Engtotal2011, Engtotal2012, Engtotal2013, Engtotal2014, Engtotal2015,\n Engtotal2016, Engtotal2017]\nServ = [ServTotal2011, ServTotal2012, ServTotal2013, ServTotal2014,\n ServTotal2015, ServTotal2016, ServTotal2017]\nEnt = [EntTotal2011, EntTotal2012, EntTotal2013, EntTotal2014, EntTotal2015,\n EntTotal2016, EntTotal2017]\nplt.plot(year, HC, color='blue', label='Healthcare')\nplt.plot(year, Comp, color='red', label='Computer & Math')\nplt.plot(year, Eng, color='green', label='Engineering')\nplt.plot(year, Serv, color='yellow', label='Service')\nplt.plot(year, Ent, color='purple', label='Entertainment')\nplt.title('Total Employees per Industry by Year')\nplt.xlabel('Year')\nplt.ylabel('Toal Employees')\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.savefig('Industry Analysis')\n",
"<import token>\nGender_Age_2017_DF = Gender_Age_2017()\nGender_Age_2016_DF = Gender_Age_2016()\nGender_Age_2015_DF = Gender_Age_2015()\nGender_Age_2014_DF = Gender_Age_2014()\nGender_Age_2013_DF = Gender_Age_2013()\nGender_Age_2012_DF = Gender_Age_2012()\nGender_Age_2011_DF = Gender_Age_2011()\nComp2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Computer and Math']\nEng2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Engineering']\nEnt2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] ==\n 'Healthcare']\nServ2017 = Gender_Age_2017_DF.loc[Gender_Age_2017_DF['Industry_x'] == 'Service'\n ]\nComp2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Computer and Math']\nEng2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Engineering']\nEnt2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] ==\n 'Healthcare']\nServ2016 = Gender_Age_2016_DF.loc[Gender_Age_2016_DF['Industry_x'] == 'Service'\n ]\nComp2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Computer and Math']\nEng2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Engineering']\nEnt2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] ==\n 'Healthcare']\nServ2015 = Gender_Age_2015_DF.loc[Gender_Age_2015_DF['Industry_x'] == 'Service'\n ]\nComp2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Computer and Math']\nEng2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Engineering']\nEnt2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] ==\n 'Healthcare']\nServ2014 = Gender_Age_2014_DF.loc[Gender_Age_2014_DF['Industry_x'] == 'Service'\n ]\nComp2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Computer and Math']\nEng2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Engineering']\nEnt2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] ==\n 'Healthcare']\nServ2013 = Gender_Age_2013_DF.loc[Gender_Age_2013_DF['Industry_x'] == 'Service'\n ]\nComp2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Computer and Math']\nEng2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Engineering']\nEnt2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] ==\n 'Healthcare']\nServ2012 = Gender_Age_2012_DF.loc[Gender_Age_2012_DF['Industry_x'] == 'Service'\n ]\nComp2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Computer and Math']\nEng2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Engineering']\nEnt2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Entertainmnet']\nHC2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] ==\n 'Healthcare']\nServ2011 = Gender_Age_2011_DF.loc[Gender_Age_2011_DF['Industry_x'] == 'Service'\n ]\nTotal_Emp = []\nHCTotal2017 = HC2017['Total Number of workers'].sum()\nHCTotal2016 = HC2016['Total Number of workers'].sum()\nHCTotal2015 = HC2015['Total Number of workers'].sum()\nHCTotal2014 = HC2014['Total Number of workers'].sum()\nHCTotal2013 = HC2013['Total Number of workers'].sum()\nHCTotal2012 = HC2012['Total Number of workers'].sum()\nHCTotal2011 = HC2011['Total Number of workers'].sum()\nEntTotal2017 = Ent2017['Total Number of workers'].sum()\nEntTotal2016 = Ent2016['Total Number of workers'].sum()\nEntTotal2015 = Ent2015['Total Number of workers'].sum()\nEntTotal2014 = Ent2014['Total Number of workers'].sum()\nEntTotal2013 = Ent2013['Total Number of workers'].sum()\nEntTotal2012 = Ent2012['Total Number of workers'].sum()\nEntTotal2011 = Ent2011['Total Number of workers'].sum()\nServTotal2017 = Serv2017['Total Number of workers'].sum()\nServTotal2016 = Serv2016['Total Number of workers'].sum()\nServTotal2015 = Serv2015['Total Number of workers'].sum()\nServTotal2014 = Serv2014['Total Number of workers'].sum()\nServTotal2013 = Serv2013['Total Number of workers'].sum()\nServTotal2012 = Serv2012['Total Number of workers'].sum()\nServTotal2011 = Serv2011['Total Number of workers'].sum()\nComptotal2017 = Comp2017['Total Number of workers'].sum()\nComptotal2016 = Comp2016['Total Number of workers'].sum()\nComptotal2015 = Comp2015['Total Number of workers'].sum()\nComptotal2014 = Comp2014['Total Number of workers'].sum()\nComptotal2013 = Comp2013['Total Number of workers'].sum()\nComptotal2012 = Comp2012['Total Number of workers'].sum()\nComptotal2011 = Comp2011['Total Number of workers'].sum()\nEngtotal2017 = Eng2017['Total Number of workers'].sum()\nEngtotal2016 = Eng2016['Total Number of workers'].sum()\nEngtotal2015 = Eng2015['Total Number of workers'].sum()\nEngtotal2014 = Eng2014['Total Number of workers'].sum()\nEngtotal2013 = Eng2013['Total Number of workers'].sum()\nEngtotal2012 = Eng2012['Total Number of workers'].sum()\nEngtotal2011 = Eng2011['Total Number of workers'].sum()\nyear = [2011, 2012, 2013, 2014, 2015, 2016, 2017]\nHC = [HCTotal2011, HCTotal2012, HCTotal2013, HCTotal2014, HCTotal2015,\n HCTotal2016, HCTotal2017]\nComp = [Comptotal2011, Comptotal2012, Comptotal2013, Comptotal2014,\n Comptotal2015, Comptotal2016, Comptotal2017]\nEng = [Engtotal2011, Engtotal2012, Engtotal2013, Engtotal2014, Engtotal2015,\n Engtotal2016, Engtotal2017]\nServ = [ServTotal2011, ServTotal2012, ServTotal2013, ServTotal2014,\n ServTotal2015, ServTotal2016, ServTotal2017]\nEnt = [EntTotal2011, EntTotal2012, EntTotal2013, EntTotal2014, EntTotal2015,\n EntTotal2016, EntTotal2017]\nplt.plot(year, HC, color='blue', label='Healthcare')\nplt.plot(year, Comp, color='red', label='Computer & Math')\nplt.plot(year, Eng, color='green', label='Engineering')\nplt.plot(year, Serv, color='yellow', label='Service')\nplt.plot(year, Ent, color='purple', label='Entertainment')\nplt.title('Total Employees per Industry by Year')\nplt.xlabel('Year')\nplt.ylabel('Toal Employees')\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.savefig('Industry Analysis')\n",
"<import token>\n<assignment token>\nplt.plot(year, HC, color='blue', label='Healthcare')\nplt.plot(year, Comp, color='red', label='Computer & Math')\nplt.plot(year, Eng, color='green', label='Engineering')\nplt.plot(year, Serv, color='yellow', label='Service')\nplt.plot(year, Ent, color='purple', label='Entertainment')\nplt.title('Total Employees per Industry by Year')\nplt.xlabel('Year')\nplt.ylabel('Toal Employees')\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.savefig('Industry Analysis')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,591 |
b77721d6baf9cb82539e6aae6e1179601e8347d9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Timothy Dozat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import io
import re
import codecs
from collections import Counter
import numpy as np
import tensorflow as tf
from nparser import Configurable, Multibucket
from nparser.vocabs.base_vocab import BaseVocab
from nparser.misc.bucketer import Bucketer
__all__ = ['Trainset', 'Parseset']
#***************************************************************
class Dataset(Configurable):
""" """
#=============================================================
def __init__(self, vocabs, *args, **kwargs):
""" """
nlp_model = kwargs.pop('nlp_model', None)
if "parse_files" in kwargs and isinstance(kwargs["parse_files"],io.StringIO): ### SPECIAL CASE - PARSING StringIO
self.preopen_parse_file=kwargs.pop("parse_files") #This doesn't really play well with the configparser thing
else:
self.preopen_parse_file=None
super(Dataset, self).__init__(*args, **kwargs)
self._vocabs = vocabs
self._multibuckets = [Multibucket.from_configurable(vocab, name='%s-%s'%(self.name, vocab.name)) for vocab in self.vocabs]
self._metadata=[]
if nlp_model is not None:
self._nlp_model = nlp_model.from_configurable(self, name=self.name)
else:
self._nlp_model = None
with Bucketer.from_configurable(self, self.n_buckets, name='bucketer-%s'%self.name) as bucketer:
splits = bucketer.compute_splits(len(sent) for sent,metadata in self.iterfiles())
for i in range(len(splits)):
splits[i] += 1
for multibucket, vocab in self.iteritems():
multibucket.open(splits, depth=vocab.depth)
for sent,metadata in self.iterfiles():
#mycode begins
#words = [line[1] for line in sent]
#uposList = [line[3] for line in sent]
#xposList = [line[4].split('|',1)[0][5:] for line in sent]
#morpList = [line[5] for line in sent]
#newUposList = rule_based_parser(words,uposList,xposList,morpList)
#for i,line in enumerate(sent):
# line[3] = newUposList[i]
#print(sent)
#mycode ends
self._metadata.append(metadata)
for multibucket, vocab in self.iteritems():
tokens = [line[vocab.conll_idx] for line in sent]
idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]
multibucket.add(idxs, tokens)
for multibucket in self:
multibucket.close()
self._multibucket = Multibucket.from_dataset(self)
return
#=============================================================
def __call__(self, moving_params=None):
""" """
return self._nlp_model(self.vocabs, moving_params=moving_params)
#=============================================================
def iterfiles(self):
""" """
#0 1 2 3 4 5 6 7 8 9
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(10)
if isinstance(self.preopen_parse_file,io.StringIO): #Go from here
data_files=[self.preopen_parse_file]
else:
data_files=self.data_files
for data_file in data_files:
if isinstance(data_file,str):
f=codecs.open(data_file, encoding='utf-8', errors='ignore')
else:
f=data_file
buff = []
metadata = {"comments":[],"miscfield":[],"feats":[],"multiwordtokens":[]}
for line in f:
line = line.strip()
if line:
if not line.startswith('#'):
if not re.match('^[0-9]+[-.][0-9]+\t', line):
cols=line.split("\t")
metadata["miscfield"].append(cols[MISC])
metadata["feats"].append(cols[FEATS])
buff.append(cols)
elif re.match('^[0-9]+[-][0-9]+\t', line): #multiword token
cols=line.split("\t")
beg,end=cols[ID].split("-")
metadata["multiwordtokens"].append((int(beg),int(end),cols[FORM]))
else:
metadata["comments"].append(line)
elif buff:
yield buff, metadata
buff = []
metadata = {"comments":[],"miscfield":[],"feats":[],"multiwordtokens":[]}
yield buff, metadata
if isinstance(data_file,str):
f.close()
else:
f.seek(0) #rewind for new reading
#=============================================================
def iterbatches(self, shuffle=True, return_check=False):
""" """
batch_size = self.batch_size
batch_by = self.batch_by
batches = []
for bkt_idx, bucket in enumerate(self.multibucket):
if batch_size == 0:
n_splits = 1
else:
if batch_by == 'tokens':
n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]
n_splits = max(n_tokens // batch_size, 1)
elif batch_by == 'seqs':
n_seqs = bucket.indices.shape[0]
n_splits = max(n_seqs // batch_size, 1)
if shuffle:
range_func = np.random.permutation
else:
range_func = np.arange
splits = np.array_split(range_func(bucket.indices.shape[0])[1:], n_splits)
for split in splits:
batches.append( (bkt_idx, split) )
if shuffle:
np.random.shuffle(batches)
for bkt_idx, batch in batches:
feed_dict = {}
tokens = []
for multibucket, vocab in self.iteritems():
bucket = multibucket[bkt_idx]
indices = bucket.indices[batch]
vocab.set_feed_dict(indices, feed_dict)
if return_check:
#print("INDICES",indices.shape,indices)
if len(indices.shape) == 2:
tokens.append(vocab[indices])
elif len(indices.shape) == 3:
for i,subvocab in enumerate(vocab):
tokens.append(subvocab[indices[:,:,i]])
#print("SUBVOCAB",subvocab)
#tokens.extend([subvocab[indices[:,:,i]] for i, subvocab in enumerate(vocab)])
# TODO This is super hacky
if hasattr(subvocab, 'idx2tok'):
tokens[-1] = [[subvocab.idx2tok.get(idx, subvocab[subvocab.PAD]) for idx in idxs] for idxs in indices[:,:,-1]]
elif not shuffle:
tokens.append(bucket.get_tokens(batch))
if not shuffle or return_check:
yield feed_dict, list(zip(*tokens))
else:
yield feed_dict
#=============================================================
def iteritems(self):
for i in range(len(self)):
yield (self[i], self._vocabs[i])
#=============================================================
def update_history(self, history, accumulators):
return self._nlp_model.update_history(history, accumulators)
def print_accuracy(self, accumulators, time):
return self._nlp_model.print_accuracy(accumulators, time, prefix=self.PREFIX.title())
def write_probs(self, sents, output_file, probs, metadata):
return self._nlp_model.write_probs(sents, output_file, probs, self.multibucket.inv_idxs(), metadata)
def check(self, preds, sents, fileobj):
return self._nlp_model.check(preds, sents, fileobj)
def plot(self, history):
return self._nlp_model.plot(history)
#=============================================================
@property
def data_files(self):
return getattr(self, '{0}_files'.format(self.PREFIX.lower()))
@property
def multibucket(self):
return self._multibucket
@property
def vocabs(self):
return self._vocabs
@property
def train_keys(self):
return self._nlp_model.train_keys
@property
def valid_keys(self):
return self._nlp_model.valid_keys
@property
def parse_keys(self):
return self._nlp_model.parse_keys
#=============================================================
def __len__(self):
return len(self._multibuckets)
def __iter__(self):
return (multibucket for multibucket in self._multibuckets)
def __getitem__(self, key):
return self._multibuckets[key]
#***************************************************************
class Trainset(Dataset):
PREFIX = 'train'
class Parseset(Dataset):
PREFIX = 'parse'
#***************************************************************
if __name__ == '__main__':
""" """
from nparser.vocabs import *
from nparser.dataset import Trainset
configurable = Configurable()
dep_vocab = DepVocab.from_configurable(configurable)
word_vocab = WordVocab.from_configurable(configurable)
lemma_vocab = LemmaVocab.from_configurable(configurable)
pretrained_vocab = PretrainedVocab.from_vocab(word_vocab)
char_vocab = NgramMultivocab.from_vocab(word_vocab)
word_multivocab = Multivocab.from_configurable(configurable, [word_vocab, pretrained_vocab, char_vocab], name='words')
tag_vocab = TagVocab.from_configurable(configurable)
xtag_vocab = XTagVocab.from_configurable(configurable)
head_vocab = HeadVocab.from_configurable(configurable)
rel_vocab = RelVocab.from_configurable(configurable)
trainset = Trainset.from_configurable(configurable, [dep_vocab, word_multivocab, lemma_vocab, tag_vocab, xtag_vocab, head_vocab, rel_vocab])
trainset()
print('Dataset passes',file=sys.stderr)
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2016 Timothy Dozat\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\nimport sys\nimport io\nimport re\nimport codecs\nfrom collections import Counter\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom nparser import Configurable, Multibucket\nfrom nparser.vocabs.base_vocab import BaseVocab\nfrom nparser.misc.bucketer import Bucketer\n\n__all__ = ['Trainset', 'Parseset']\n\n#***************************************************************\nclass Dataset(Configurable):\n \"\"\" \"\"\"\n \n #=============================================================\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n \n nlp_model = kwargs.pop('nlp_model', None)\n if \"parse_files\" in kwargs and isinstance(kwargs[\"parse_files\"],io.StringIO): ### SPECIAL CASE - PARSING StringIO\n self.preopen_parse_file=kwargs.pop(\"parse_files\") #This doesn't really play well with the configparser thing\n else:\n self.preopen_parse_file=None\n super(Dataset, self).__init__(*args, **kwargs)\n \n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name='%s-%s'%(self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata=[]\n \n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n \n with Bucketer.from_configurable(self, self.n_buckets, name='bucketer-%s'%self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent,metadata in self.iterfiles())\n \n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent,metadata in self.iterfiles():\n \n #mycode begins\n #words = [line[1] for line in sent]\n #uposList = [line[3] for line in sent]\n #xposList = [line[4].split('|',1)[0][5:] for line in sent]\n #morpList = [line[5] for line in sent]\n #newUposList = rule_based_parser(words,uposList,xposList,morpList)\n #for i,line in enumerate(sent):\n # line[3] = newUposList[i]\n #print(sent)\n #mycode ends\n \n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n \n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n \n return\n \n #=============================================================\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n \n return self._nlp_model(self.vocabs, moving_params=moving_params)\n \n #=============================================================\n def iterfiles(self):\n \"\"\" \"\"\"\n #0 1 2 3 4 5 6 7 8 9\n ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(10)\n if isinstance(self.preopen_parse_file,io.StringIO): #Go from here\n data_files=[self.preopen_parse_file]\n else:\n data_files=self.data_files\n for data_file in data_files:\n if isinstance(data_file,str):\n f=codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f=data_file\n \n buff = []\n metadata = {\"comments\":[],\"miscfield\":[],\"feats\":[],\"multiwordtokens\":[]}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols=line.split(\"\\t\")\n metadata[\"miscfield\"].append(cols[MISC])\n metadata[\"feats\"].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line): #multiword token\n cols=line.split(\"\\t\")\n beg,end=cols[ID].split(\"-\")\n metadata[\"multiwordtokens\"].append((int(beg),int(end),cols[FORM]))\n else:\n metadata[\"comments\"].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {\"comments\":[],\"miscfield\":[],\"feats\":[],\"multiwordtokens\":[]}\n yield buff, metadata\n\n if isinstance(data_file,str):\n f.close()\n else:\n f.seek(0) #rewind for new reading\n \n #=============================================================\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n \n batch_size = self.batch_size\n batch_by = self.batch_by \n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n else:\n if batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:], n_splits)\n for split in splits:\n batches.append( (bkt_idx, split) )\n if shuffle:\n np.random.shuffle(batches)\n\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n #print(\"INDICES\",indices.shape,indices)\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i,subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:,:,i]])\n #print(\"SUBVOCAB\",subvocab)\n #tokens.extend([subvocab[indices[:,:,i]] for i, subvocab in enumerate(vocab)])\n # TODO This is super hacky\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx, subvocab[subvocab.PAD]) for idx in idxs] for idxs in indices[:,:,-1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n \n #=============================================================\n def iteritems(self):\n for i in range(len(self)):\n yield (self[i], self._vocabs[i])\n \n #=============================================================\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n \n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=self.PREFIX.title())\n \n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n \n def plot(self, history):\n return self._nlp_model.plot(history)\n \n #=============================================================\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n @property\n def multibucket(self):\n return self._multibucket\n @property\n def vocabs(self):\n return self._vocabs\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n \n #=============================================================\n def __len__(self):\n return len(self._multibuckets)\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n#***************************************************************\nclass Trainset(Dataset):\n PREFIX = 'train'\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n#***************************************************************\nif __name__ == '__main__':\n \"\"\" \"\"\"\n \n from nparser.vocabs import *\n from nparser.dataset import Trainset\n \n configurable = Configurable()\n dep_vocab = DepVocab.from_configurable(configurable)\n word_vocab = WordVocab.from_configurable(configurable)\n lemma_vocab = LemmaVocab.from_configurable(configurable)\n pretrained_vocab = PretrainedVocab.from_vocab(word_vocab)\n char_vocab = NgramMultivocab.from_vocab(word_vocab)\n word_multivocab = Multivocab.from_configurable(configurable, [word_vocab, pretrained_vocab, char_vocab], name='words')\n tag_vocab = TagVocab.from_configurable(configurable)\n xtag_vocab = XTagVocab.from_configurable(configurable)\n head_vocab = HeadVocab.from_configurable(configurable)\n rel_vocab = RelVocab.from_configurable(configurable)\n trainset = Trainset.from_configurable(configurable, [dep_vocab, word_multivocab, lemma_vocab, tag_vocab, xtag_vocab, head_vocab, rel_vocab])\n trainset()\n print('Dataset passes',file=sys.stderr)\n \n",
"import sys\nimport io\nimport re\nimport codecs\nfrom collections import Counter\nimport numpy as np\nimport tensorflow as tf\nfrom nparser import Configurable, Multibucket\nfrom nparser.vocabs.base_vocab import BaseVocab\nfrom nparser.misc.bucketer import Bucketer\n__all__ = ['Trainset', 'Parseset']\n\n\nclass Dataset(Configurable):\n \"\"\" \"\"\"\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n\n def plot(self, history):\n return self._nlp_model.plot(history)\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\nif __name__ == '__main__':\n \"\"\" \"\"\"\n from nparser.vocabs import *\n from nparser.dataset import Trainset\n configurable = Configurable()\n dep_vocab = DepVocab.from_configurable(configurable)\n word_vocab = WordVocab.from_configurable(configurable)\n lemma_vocab = LemmaVocab.from_configurable(configurable)\n pretrained_vocab = PretrainedVocab.from_vocab(word_vocab)\n char_vocab = NgramMultivocab.from_vocab(word_vocab)\n word_multivocab = Multivocab.from_configurable(configurable, [\n word_vocab, pretrained_vocab, char_vocab], name='words')\n tag_vocab = TagVocab.from_configurable(configurable)\n xtag_vocab = XTagVocab.from_configurable(configurable)\n head_vocab = HeadVocab.from_configurable(configurable)\n rel_vocab = RelVocab.from_configurable(configurable)\n trainset = Trainset.from_configurable(configurable, [dep_vocab,\n word_multivocab, lemma_vocab, tag_vocab, xtag_vocab, head_vocab,\n rel_vocab])\n trainset()\n print('Dataset passes', file=sys.stderr)\n",
"<import token>\n__all__ = ['Trainset', 'Parseset']\n\n\nclass Dataset(Configurable):\n \"\"\" \"\"\"\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n\n def plot(self, history):\n return self._nlp_model.plot(history)\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\nif __name__ == '__main__':\n \"\"\" \"\"\"\n from nparser.vocabs import *\n from nparser.dataset import Trainset\n configurable = Configurable()\n dep_vocab = DepVocab.from_configurable(configurable)\n word_vocab = WordVocab.from_configurable(configurable)\n lemma_vocab = LemmaVocab.from_configurable(configurable)\n pretrained_vocab = PretrainedVocab.from_vocab(word_vocab)\n char_vocab = NgramMultivocab.from_vocab(word_vocab)\n word_multivocab = Multivocab.from_configurable(configurable, [\n word_vocab, pretrained_vocab, char_vocab], name='words')\n tag_vocab = TagVocab.from_configurable(configurable)\n xtag_vocab = XTagVocab.from_configurable(configurable)\n head_vocab = HeadVocab.from_configurable(configurable)\n rel_vocab = RelVocab.from_configurable(configurable)\n trainset = Trainset.from_configurable(configurable, [dep_vocab,\n word_multivocab, lemma_vocab, tag_vocab, xtag_vocab, head_vocab,\n rel_vocab])\n trainset()\n print('Dataset passes', file=sys.stderr)\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n \"\"\" \"\"\"\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n\n def plot(self, history):\n return self._nlp_model.plot(history)\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\nif __name__ == '__main__':\n \"\"\" \"\"\"\n from nparser.vocabs import *\n from nparser.dataset import Trainset\n configurable = Configurable()\n dep_vocab = DepVocab.from_configurable(configurable)\n word_vocab = WordVocab.from_configurable(configurable)\n lemma_vocab = LemmaVocab.from_configurable(configurable)\n pretrained_vocab = PretrainedVocab.from_vocab(word_vocab)\n char_vocab = NgramMultivocab.from_vocab(word_vocab)\n word_multivocab = Multivocab.from_configurable(configurable, [\n word_vocab, pretrained_vocab, char_vocab], name='words')\n tag_vocab = TagVocab.from_configurable(configurable)\n xtag_vocab = XTagVocab.from_configurable(configurable)\n head_vocab = HeadVocab.from_configurable(configurable)\n rel_vocab = RelVocab.from_configurable(configurable)\n trainset = Trainset.from_configurable(configurable, [dep_vocab,\n word_multivocab, lemma_vocab, tag_vocab, xtag_vocab, head_vocab,\n rel_vocab])\n trainset()\n print('Dataset passes', file=sys.stderr)\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n \"\"\" \"\"\"\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n\n def plot(self, history):\n return self._nlp_model.plot(history)\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n\n def plot(self, history):\n return self._nlp_model.plot(history)\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n\n def write_probs(self, sents, output_file, probs, metadata):\n return self._nlp_model.write_probs(sents, output_file, probs, self.\n multibucket.inv_idxs(), metadata)\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n\n def iterbatches(self, shuffle=True, return_check=False):\n \"\"\" \"\"\"\n batch_size = self.batch_size\n batch_by = self.batch_by\n batches = []\n for bkt_idx, bucket in enumerate(self.multibucket):\n if batch_size == 0:\n n_splits = 1\n elif batch_by == 'tokens':\n n_tokens = bucket.indices.shape[0] * bucket.indices.shape[1]\n n_splits = max(n_tokens // batch_size, 1)\n elif batch_by == 'seqs':\n n_seqs = bucket.indices.shape[0]\n n_splits = max(n_seqs // batch_size, 1)\n if shuffle:\n range_func = np.random.permutation\n else:\n range_func = np.arange\n splits = np.array_split(range_func(bucket.indices.shape[0])[1:],\n n_splits)\n for split in splits:\n batches.append((bkt_idx, split))\n if shuffle:\n np.random.shuffle(batches)\n for bkt_idx, batch in batches:\n feed_dict = {}\n tokens = []\n for multibucket, vocab in self.iteritems():\n bucket = multibucket[bkt_idx]\n indices = bucket.indices[batch]\n vocab.set_feed_dict(indices, feed_dict)\n if return_check:\n if len(indices.shape) == 2:\n tokens.append(vocab[indices])\n elif len(indices.shape) == 3:\n for i, subvocab in enumerate(vocab):\n tokens.append(subvocab[indices[:, :, i]])\n if hasattr(subvocab, 'idx2tok'):\n tokens[-1] = [[subvocab.idx2tok.get(idx,\n subvocab[subvocab.PAD]) for idx in idxs] for\n idxs in indices[:, :, -1]]\n elif not shuffle:\n tokens.append(bucket.get_tokens(batch))\n if not shuffle or return_check:\n yield feed_dict, list(zip(*tokens))\n else:\n yield feed_dict\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n\n def __init__(self, vocabs, *args, **kwargs):\n \"\"\" \"\"\"\n nlp_model = kwargs.pop('nlp_model', None)\n if 'parse_files' in kwargs and isinstance(kwargs['parse_files'], io\n .StringIO):\n self.preopen_parse_file = kwargs.pop('parse_files')\n else:\n self.preopen_parse_file = None\n super(Dataset, self).__init__(*args, **kwargs)\n self._vocabs = vocabs\n self._multibuckets = [Multibucket.from_configurable(vocab, name=\n '%s-%s' % (self.name, vocab.name)) for vocab in self.vocabs]\n self._metadata = []\n if nlp_model is not None:\n self._nlp_model = nlp_model.from_configurable(self, name=self.name)\n else:\n self._nlp_model = None\n with Bucketer.from_configurable(self, self.n_buckets, name=\n 'bucketer-%s' % self.name) as bucketer:\n splits = bucketer.compute_splits(len(sent) for sent, metadata in\n self.iterfiles())\n for i in range(len(splits)):\n splits[i] += 1\n for multibucket, vocab in self.iteritems():\n multibucket.open(splits, depth=vocab.depth)\n for sent, metadata in self.iterfiles():\n self._metadata.append(metadata)\n for multibucket, vocab in self.iteritems():\n tokens = [line[vocab.conll_idx] for line in sent]\n idxs = [vocab.ROOT] + [vocab.index(token) for token in tokens]\n multibucket.add(idxs, tokens)\n for multibucket in self:\n multibucket.close()\n self._multibucket = Multibucket.from_dataset(self)\n return\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n\n @property\n def vocabs(self):\n return self._vocabs\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n\n def update_history(self, history, accumulators):\n return self._nlp_model.update_history(history, accumulators)\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n\n @property\n def train_keys(self):\n return self._nlp_model.train_keys\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n\n def __len__(self):\n return len(self._multibuckets)\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n\n def check(self, preds, sents, fileobj):\n return self._nlp_model.check(preds, sents, fileobj)\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n\n def iterfiles(self):\n \"\"\" \"\"\"\n ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10\n )\n if isinstance(self.preopen_parse_file, io.StringIO):\n data_files = [self.preopen_parse_file]\n else:\n data_files = self.data_files\n for data_file in data_files:\n if isinstance(data_file, str):\n f = codecs.open(data_file, encoding='utf-8', errors='ignore')\n else:\n f = data_file\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [],\n 'multiwordtokens': []}\n for line in f:\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if not re.match('^[0-9]+[-.][0-9]+\\t', line):\n cols = line.split('\\t')\n metadata['miscfield'].append(cols[MISC])\n metadata['feats'].append(cols[FEATS])\n buff.append(cols)\n elif re.match('^[0-9]+[-][0-9]+\\t', line):\n cols = line.split('\\t')\n beg, end = cols[ID].split('-')\n metadata['multiwordtokens'].append((int(beg),\n int(end), cols[FORM]))\n else:\n metadata['comments'].append(line)\n elif buff:\n yield buff, metadata\n buff = []\n metadata = {'comments': [], 'miscfield': [], 'feats': [\n ], 'multiwordtokens': []}\n yield buff, metadata\n if isinstance(data_file, str):\n f.close()\n else:\n f.seek(0)\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n\n def print_accuracy(self, accumulators, time):\n return self._nlp_model.print_accuracy(accumulators, time, prefix=\n self.PREFIX.title())\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n\n def iteritems(self):\n for i in range(len(self)):\n yield self[i], self._vocabs[i]\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n\n @property\n def multibucket(self):\n return self._multibucket\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n\n def __iter__(self):\n return (multibucket for multibucket in self._multibuckets)\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n\n @property\n def parse_keys(self):\n return self._nlp_model.parse_keys\n <function token>\n <function token>\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n\n @property\n def valid_keys(self):\n return self._nlp_model.valid_keys\n <function token>\n <function token>\n <function token>\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n\n def __call__(self, moving_params=None):\n \"\"\" \"\"\"\n return self._nlp_model(self.vocabs, moving_params=moving_params)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __getitem__(self, key):\n return self._multibuckets[key]\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def data_files(self):\n return getattr(self, '{0}_files'.format(self.PREFIX.lower()))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Dataset(Configurable):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Trainset(Dataset):\n PREFIX = 'train'\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Trainset(Dataset):\n <assignment token>\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Parseset(Dataset):\n PREFIX = 'parse'\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass Parseset(Dataset):\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n"
] | false |
99,592 |
9371c5399d6096394467f2718e1a3536ee5f5c0d
|
from queue import PriorityQueue
from math import sqrt
def euclidean_heuristic(start, end, M):
start_x = M.intersections[start][0]
start_y = M.intersections[start][1]
end_x = M.intersections[end][0]
end_y = M.intersections[end][1]
return sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
def shortest_path(M, start, goal):
parent = {} # save the correct parent of a node since we need to display the path
distances = {} # save the intermediate distance for a node
parent[start] = None # the first node has no parent
distances[start] = 0
priority = PriorityQueue()
priority.put(start, 0)
while not priority.empty():
current_node = priority.get()# get the node with the minimum total_distance(f = g + h)
#iterate through the connected nodes
for neighbor in M.roads[current_node]:
current_distance = euclidean_heuristic(current_node, neighbor, M) # calculate the distance between the node
new_distance = distances[current_node] + current_distance # if we already passed through a node, we may find a better distance with another route
if neighbor not in distances or new_distance < distances[neighbor]:
goal_distance = euclidean_heuristic(neighbor, goal, M)# the distance from the new node to the goal node
total_distance = new_distance + goal_distance # f = g + h
distances[neighbor] = new_distance
parent[neighbor] = current_node
priority.put(neighbor, total_distance)
#at the end we get the path using the parents of the nodes
current_node = goal
path = [current_node]
while current_node != start:
current_node = parent[current_node]
path.append(current_node)
return path[::-1]#we need to reverse the path since we started from the goal and went backwards
|
[
"from queue import PriorityQueue\nfrom math import sqrt\n\ndef euclidean_heuristic(start, end, M):\n start_x = M.intersections[start][0]\n start_y = M.intersections[start][1]\n end_x = M.intersections[end][0]\n end_y = M.intersections[end][1]\n \n return sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n \ndef shortest_path(M, start, goal):\n parent = {} # save the correct parent of a node since we need to display the path\n distances = {} # save the intermediate distance for a node \n parent[start] = None # the first node has no parent\n distances[start] = 0\n \n priority = PriorityQueue()\n priority.put(start, 0)\n\n while not priority.empty():\n current_node = priority.get()# get the node with the minimum total_distance(f = g + h)\n \n #iterate through the connected nodes\n for neighbor in M.roads[current_node]:\n current_distance = euclidean_heuristic(current_node, neighbor, M) # calculate the distance between the node\n new_distance = distances[current_node] + current_distance # if we already passed through a node, we may find a better distance with another route\n \n if neighbor not in distances or new_distance < distances[neighbor]:\n goal_distance = euclidean_heuristic(neighbor, goal, M)# the distance from the new node to the goal node\n total_distance = new_distance + goal_distance # f = g + h\n distances[neighbor] = new_distance\n parent[neighbor] = current_node\n priority.put(neighbor, total_distance)\n\n #at the end we get the path using the parents of the nodes\n current_node = goal\n path = [current_node]\n while current_node != start:\n current_node = parent[current_node]\n path.append(current_node)\n return path[::-1]#we need to reverse the path since we started from the goal and went backwards\n",
"from queue import PriorityQueue\nfrom math import sqrt\n\n\ndef euclidean_heuristic(start, end, M):\n start_x = M.intersections[start][0]\n start_y = M.intersections[start][1]\n end_x = M.intersections[end][0]\n end_y = M.intersections[end][1]\n return sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n\n\ndef shortest_path(M, start, goal):\n parent = {}\n distances = {}\n parent[start] = None\n distances[start] = 0\n priority = PriorityQueue()\n priority.put(start, 0)\n while not priority.empty():\n current_node = priority.get()\n for neighbor in M.roads[current_node]:\n current_distance = euclidean_heuristic(current_node, neighbor, M)\n new_distance = distances[current_node] + current_distance\n if neighbor not in distances or new_distance < distances[neighbor]:\n goal_distance = euclidean_heuristic(neighbor, goal, M)\n total_distance = new_distance + goal_distance\n distances[neighbor] = new_distance\n parent[neighbor] = current_node\n priority.put(neighbor, total_distance)\n current_node = goal\n path = [current_node]\n while current_node != start:\n current_node = parent[current_node]\n path.append(current_node)\n return path[::-1]\n",
"<import token>\n\n\ndef euclidean_heuristic(start, end, M):\n start_x = M.intersections[start][0]\n start_y = M.intersections[start][1]\n end_x = M.intersections[end][0]\n end_y = M.intersections[end][1]\n return sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n\n\ndef shortest_path(M, start, goal):\n parent = {}\n distances = {}\n parent[start] = None\n distances[start] = 0\n priority = PriorityQueue()\n priority.put(start, 0)\n while not priority.empty():\n current_node = priority.get()\n for neighbor in M.roads[current_node]:\n current_distance = euclidean_heuristic(current_node, neighbor, M)\n new_distance = distances[current_node] + current_distance\n if neighbor not in distances or new_distance < distances[neighbor]:\n goal_distance = euclidean_heuristic(neighbor, goal, M)\n total_distance = new_distance + goal_distance\n distances[neighbor] = new_distance\n parent[neighbor] = current_node\n priority.put(neighbor, total_distance)\n current_node = goal\n path = [current_node]\n while current_node != start:\n current_node = parent[current_node]\n path.append(current_node)\n return path[::-1]\n",
"<import token>\n\n\ndef euclidean_heuristic(start, end, M):\n start_x = M.intersections[start][0]\n start_y = M.intersections[start][1]\n end_x = M.intersections[end][0]\n end_y = M.intersections[end][1]\n return sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
99,593 |
b523d18a7a0e83a0754135ad764722fbab44f4bf
|
from .Item import Item
class StudentClass:
def __init__(self):
self.__student_list = []
def add_student(self, student: Student):
self.__student_list.append(student)
def remove_student(self, student_id):
for current_student in self.__student_list:
if current_student.id == student_id:
self.__student_list.remove(current_student)
def list_students(self):
for student in self.__student_list:
print ("Student ID: " + str(student.id) + " Student Name: " + student.name)
def get_count_of_students(self):
return len(self.__student_list)
def get_average_gpa(self):
implement_code_here = 0
math_101 = StudentClass()
s1 = Student(100, "Mary")
s2 = Student(200, "Bob")
s3 = Student(300, "Brendan")
math_101.add_student(s1)
math_101.add_student(s2)
math_101.add_student(s3)
math_101.remove_student(200)
math_101.list_students()
print("The number of students left in this class is " + str(math_101.get_count_of_students()))
|
[
"from .Item import Item\n\nclass StudentClass:\n\n def __init__(self):\n\n self.__student_list = []\n\n def add_student(self, student: Student):\n\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n\n for current_student in self.__student_list:\n\n if current_student.id == student_id:\n\n self.__student_list.remove(current_student)\n\n def list_students(self):\n\n for student in self.__student_list:\n print (\"Student ID: \" + str(student.id) + \" Student Name: \" + student.name)\n\n def get_count_of_students(self):\n\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\n\nmath_101 = StudentClass()\n\ns1 = Student(100, \"Mary\")\ns2 = Student(200, \"Bob\")\ns3 = Student(300, \"Brendan\")\n\nmath_101.add_student(s1)\nmath_101.add_student(s2)\nmath_101.add_student(s3)\nmath_101.remove_student(200)\n\nmath_101.list_students()\n\nprint(\"The number of students left in this class is \" + str(math_101.get_count_of_students()))\n",
"from .Item import Item\n\n\nclass StudentClass:\n\n def __init__(self):\n self.__student_list = []\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n\n def get_count_of_students(self):\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\nmath_101 = StudentClass()\ns1 = Student(100, 'Mary')\ns2 = Student(200, 'Bob')\ns3 = Student(300, 'Brendan')\nmath_101.add_student(s1)\nmath_101.add_student(s2)\nmath_101.add_student(s3)\nmath_101.remove_student(200)\nmath_101.list_students()\nprint('The number of students left in this class is ' + str(math_101.\n get_count_of_students()))\n",
"<import token>\n\n\nclass StudentClass:\n\n def __init__(self):\n self.__student_list = []\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n\n def get_count_of_students(self):\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\nmath_101 = StudentClass()\ns1 = Student(100, 'Mary')\ns2 = Student(200, 'Bob')\ns3 = Student(300, 'Brendan')\nmath_101.add_student(s1)\nmath_101.add_student(s2)\nmath_101.add_student(s3)\nmath_101.remove_student(200)\nmath_101.list_students()\nprint('The number of students left in this class is ' + str(math_101.\n get_count_of_students()))\n",
"<import token>\n\n\nclass StudentClass:\n\n def __init__(self):\n self.__student_list = []\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n\n def get_count_of_students(self):\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\n<assignment token>\nmath_101.add_student(s1)\nmath_101.add_student(s2)\nmath_101.add_student(s3)\nmath_101.remove_student(200)\nmath_101.list_students()\nprint('The number of students left in this class is ' + str(math_101.\n get_count_of_students()))\n",
"<import token>\n\n\nclass StudentClass:\n\n def __init__(self):\n self.__student_list = []\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n\n def get_count_of_students(self):\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n\n def get_count_of_students(self):\n return len(self.__student_list)\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n <function token>\n\n def get_average_gpa(self):\n implement_code_here = 0\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n\n def add_student(self, student: Student):\n self.__student_list.append(student)\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n <function token>\n\n def remove_student(self, student_id):\n for current_student in self.__student_list:\n if current_student.id == student_id:\n self.__student_list.remove(current_student)\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n <function token>\n <function token>\n\n def list_students(self):\n for student in self.__student_list:\n print('Student ID: ' + str(student.id) + ' Student Name: ' +\n student.name)\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass StudentClass:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
99,594 |
0007d2b1e099615f2a9544fa709fcc664c089363
|
# modusite
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
import urllib
from modu.editable import define
from modu.util import form, tags
from modu.persist import sql
from modusite.model import release
class ReleaseListField(define.definition):
"""
Display a list of releases for the current project.
"""
def get_element(self, req, style, storable):
frm = form.FormNode(self.name)
project_id = storable.get_id()
if not(project_id):
frm['release'](
type = 'label',
value = "You must save this project before adding a new release.",
)
return frm
req.store.ensure_factory('release', model_class=release.Release)
releases = req.store.load('release', project_id=project_id, __order_by='version_weight DESC') or []
query = {
'__init__[project_id]' : storable.get_id(),
'__init__[license_name]' : storable.license_name,
'__init__[license_url]' : storable.license_url,
'__init__[installation_url]' : storable.installation_url,
'__init__[changelog_url]' : storable.changelog_url,
}
new_release_url = req.get_path(req.prepath, 'detail/release/new?' + urllib.urlencode(query))
if not(releases):
if(style == 'listing'):
frm['release'](
type = 'label',
value = "(no releases)",
)
else:
frm['release'](
type = 'label',
value = "This project has no releases yet. " +
tags.a(href=new_release_url)['Click here to create one.'],
)
return frm
for r in releases:
release_url = req.get_path(req.prepath, 'detail/release', r.get_id())
frm['release'][r.get_id()](
prefix = '<span class="releases">',
suffix = '</span>',
)
frm['release'][r.get_id()]['version_string'](
type = 'label',
value = tags.a(href=release_url)[r.version_string],
)
if(style != 'listing'):
frm['release']['new'](
type = 'markup',
prefix = '<div>',
value = tags.a(href=new_release_url)['Create New Release'],
suffix = '</div>',
)
return frm
def update_storable(self, req, form, storable):
"""
No operation.
@see: L{modu.editable.define.definition.update_storable()}
"""
pass
|
[
"# modusite\n# Copyright (c) 2006-2010 Phil Christensen\n# http://modu.bubblehouse.org\n#\n#\n\nimport urllib\n\nfrom modu.editable import define\nfrom modu.util import form, tags\nfrom modu.persist import sql\n\nfrom modusite.model import release\n\nclass ReleaseListField(define.definition):\n\t\"\"\"\n\tDisplay a list of releases for the current project.\n\t\"\"\"\n\tdef get_element(self, req, style, storable):\n\t\tfrm = form.FormNode(self.name)\n\t\tproject_id = storable.get_id()\n\t\t\n\t\tif not(project_id):\n\t\t\tfrm['release'](\n\t\t\t\ttype\t= 'label',\n\t\t\t\tvalue\t= \"You must save this project before adding a new release.\",\n\t\t\t)\n\t\t\treturn frm\n\t\t\n\t\treq.store.ensure_factory('release', model_class=release.Release)\n\t\t\n\t\treleases = req.store.load('release', project_id=project_id, __order_by='version_weight DESC') or []\n\t\t\n\t\tquery = {\n\t\t\t'__init__[project_id]'\t\t\t: storable.get_id(),\n\t\t\t'__init__[license_name]'\t\t: storable.license_name,\n\t\t\t'__init__[license_url]'\t\t\t: storable.license_url,\n\t\t\t'__init__[installation_url]'\t: storable.installation_url,\n\t\t\t'__init__[changelog_url]'\t\t: storable.changelog_url,\n\t\t}\n\t\t\n\t\tnew_release_url = req.get_path(req.prepath, 'detail/release/new?' + urllib.urlencode(query))\n\t\t\n\t\tif not(releases):\n\t\t\tif(style == 'listing'):\n\t\t\t\tfrm['release'](\n\t\t\t\t\ttype\t= 'label',\n\t\t\t\t\tvalue\t= \"(no releases)\",\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tfrm['release'](\n\t\t\t\t\ttype\t= 'label',\n\t\t\t\t\tvalue\t= \"This project has no releases yet. \" +\n\t\t\t\t\t\t\t\ttags.a(href=new_release_url)['Click here to create one.'],\n\t\t\t\t)\n\t\t\treturn frm\n\t\t\n\t\tfor r in releases:\n\t\t\trelease_url = req.get_path(req.prepath, 'detail/release', r.get_id())\n\t\t\tfrm['release'][r.get_id()](\n\t\t\t\tprefix = '<span class=\"releases\">',\n\t\t\t\tsuffix = '</span>',\n\t\t\t)\n\t\t\tfrm['release'][r.get_id()]['version_string'](\n\t\t\t\ttype \t= 'label',\n\t\t\t\tvalue\t= tags.a(href=release_url)[r.version_string],\n\t\t\t)\n\t\t\n\t\tif(style != 'listing'):\n\t\t\tfrm['release']['new'](\n\t\t\t\ttype\t= 'markup',\n\t\t\t\tprefix\t= '<div>',\n\t\t\t\tvalue\t= tags.a(href=new_release_url)['Create New Release'],\n\t\t\t\tsuffix\t= '</div>',\n\t\t\t)\n\t\t\n\t\treturn frm\n\t\n\tdef update_storable(self, req, form, storable):\n\t\t\"\"\"\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t\"\"\"\n\t\tpass",
"import urllib\nfrom modu.editable import define\nfrom modu.util import form, tags\nfrom modu.persist import sql\nfrom modusite.model import release\n\n\nclass ReleaseListField(define.definition):\n \"\"\"\n\tDisplay a list of releases for the current project.\n\t\"\"\"\n\n def get_element(self, req, style, storable):\n frm = form.FormNode(self.name)\n project_id = storable.get_id()\n if not project_id:\n frm['release'](type='label', value=\n 'You must save this project before adding a new release.')\n return frm\n req.store.ensure_factory('release', model_class=release.Release)\n releases = req.store.load('release', project_id=project_id,\n __order_by='version_weight DESC') or []\n query = {'__init__[project_id]': storable.get_id(),\n '__init__[license_name]': storable.license_name,\n '__init__[license_url]': storable.license_url,\n '__init__[installation_url]': storable.installation_url,\n '__init__[changelog_url]': storable.changelog_url}\n new_release_url = req.get_path(req.prepath, 'detail/release/new?' +\n urllib.urlencode(query))\n if not releases:\n if style == 'listing':\n frm['release'](type='label', value='(no releases)')\n else:\n frm['release'](type='label', value=\n 'This project has no releases yet. ' + tags.a(href=\n new_release_url)['Click here to create one.'])\n return frm\n for r in releases:\n release_url = req.get_path(req.prepath, 'detail/release', r.\n get_id())\n frm['release'][r.get_id()](prefix='<span class=\"releases\">',\n suffix='</span>')\n frm['release'][r.get_id()]['version_string'](type='label',\n value=tags.a(href=release_url)[r.version_string])\n if style != 'listing':\n frm['release']['new'](type='markup', prefix='<div>', value=tags\n .a(href=new_release_url)['Create New Release'], suffix='</div>'\n )\n return frm\n\n def update_storable(self, req, form, storable):\n \"\"\"\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t\"\"\"\n pass\n",
"<import token>\n\n\nclass ReleaseListField(define.definition):\n \"\"\"\n\tDisplay a list of releases for the current project.\n\t\"\"\"\n\n def get_element(self, req, style, storable):\n frm = form.FormNode(self.name)\n project_id = storable.get_id()\n if not project_id:\n frm['release'](type='label', value=\n 'You must save this project before adding a new release.')\n return frm\n req.store.ensure_factory('release', model_class=release.Release)\n releases = req.store.load('release', project_id=project_id,\n __order_by='version_weight DESC') or []\n query = {'__init__[project_id]': storable.get_id(),\n '__init__[license_name]': storable.license_name,\n '__init__[license_url]': storable.license_url,\n '__init__[installation_url]': storable.installation_url,\n '__init__[changelog_url]': storable.changelog_url}\n new_release_url = req.get_path(req.prepath, 'detail/release/new?' +\n urllib.urlencode(query))\n if not releases:\n if style == 'listing':\n frm['release'](type='label', value='(no releases)')\n else:\n frm['release'](type='label', value=\n 'This project has no releases yet. ' + tags.a(href=\n new_release_url)['Click here to create one.'])\n return frm\n for r in releases:\n release_url = req.get_path(req.prepath, 'detail/release', r.\n get_id())\n frm['release'][r.get_id()](prefix='<span class=\"releases\">',\n suffix='</span>')\n frm['release'][r.get_id()]['version_string'](type='label',\n value=tags.a(href=release_url)[r.version_string])\n if style != 'listing':\n frm['release']['new'](type='markup', prefix='<div>', value=tags\n .a(href=new_release_url)['Create New Release'], suffix='</div>'\n )\n return frm\n\n def update_storable(self, req, form, storable):\n \"\"\"\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t\"\"\"\n pass\n",
"<import token>\n\n\nclass ReleaseListField(define.definition):\n <docstring token>\n\n def get_element(self, req, style, storable):\n frm = form.FormNode(self.name)\n project_id = storable.get_id()\n if not project_id:\n frm['release'](type='label', value=\n 'You must save this project before adding a new release.')\n return frm\n req.store.ensure_factory('release', model_class=release.Release)\n releases = req.store.load('release', project_id=project_id,\n __order_by='version_weight DESC') or []\n query = {'__init__[project_id]': storable.get_id(),\n '__init__[license_name]': storable.license_name,\n '__init__[license_url]': storable.license_url,\n '__init__[installation_url]': storable.installation_url,\n '__init__[changelog_url]': storable.changelog_url}\n new_release_url = req.get_path(req.prepath, 'detail/release/new?' +\n urllib.urlencode(query))\n if not releases:\n if style == 'listing':\n frm['release'](type='label', value='(no releases)')\n else:\n frm['release'](type='label', value=\n 'This project has no releases yet. ' + tags.a(href=\n new_release_url)['Click here to create one.'])\n return frm\n for r in releases:\n release_url = req.get_path(req.prepath, 'detail/release', r.\n get_id())\n frm['release'][r.get_id()](prefix='<span class=\"releases\">',\n suffix='</span>')\n frm['release'][r.get_id()]['version_string'](type='label',\n value=tags.a(href=release_url)[r.version_string])\n if style != 'listing':\n frm['release']['new'](type='markup', prefix='<div>', value=tags\n .a(href=new_release_url)['Create New Release'], suffix='</div>'\n )\n return frm\n\n def update_storable(self, req, form, storable):\n \"\"\"\n\t\tNo operation.\n\t\t\n\t\t@see: L{modu.editable.define.definition.update_storable()}\n\t\t\"\"\"\n pass\n",
"<import token>\n\n\nclass ReleaseListField(define.definition):\n <docstring token>\n\n def get_element(self, req, style, storable):\n frm = form.FormNode(self.name)\n project_id = storable.get_id()\n if not project_id:\n frm['release'](type='label', value=\n 'You must save this project before adding a new release.')\n return frm\n req.store.ensure_factory('release', model_class=release.Release)\n releases = req.store.load('release', project_id=project_id,\n __order_by='version_weight DESC') or []\n query = {'__init__[project_id]': storable.get_id(),\n '__init__[license_name]': storable.license_name,\n '__init__[license_url]': storable.license_url,\n '__init__[installation_url]': storable.installation_url,\n '__init__[changelog_url]': storable.changelog_url}\n new_release_url = req.get_path(req.prepath, 'detail/release/new?' +\n urllib.urlencode(query))\n if not releases:\n if style == 'listing':\n frm['release'](type='label', value='(no releases)')\n else:\n frm['release'](type='label', value=\n 'This project has no releases yet. ' + tags.a(href=\n new_release_url)['Click here to create one.'])\n return frm\n for r in releases:\n release_url = req.get_path(req.prepath, 'detail/release', r.\n get_id())\n frm['release'][r.get_id()](prefix='<span class=\"releases\">',\n suffix='</span>')\n frm['release'][r.get_id()]['version_string'](type='label',\n value=tags.a(href=release_url)[r.version_string])\n if style != 'listing':\n frm['release']['new'](type='markup', prefix='<div>', value=tags\n .a(href=new_release_url)['Create New Release'], suffix='</div>'\n )\n return frm\n <function token>\n",
"<import token>\n\n\nclass ReleaseListField(define.definition):\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,595 |
291ea7713c3074e2eeaa6ba9c0e9098f79b7674e
|
import math
import demoLibrary
#scope function
def sampleFunction(x,y,z):
outOfScope = 10
return (x+y+z)
pass
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print("You have" + str(cheese_count) + "Cheeses!")
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
#name space main
def main():
cheese = input("how much cheese do you want: ")
cracker = input("# crackers?: ")
cheese_and_crackers(cheese, cracker)
main()
#scope global
|
[
"import math\nimport demoLibrary\n\n\n#scope function\ndef sampleFunction(x,y,z):\n outOfScope = 10\n return (x+y+z)\n pass\n\n\n\n\n\ndef cheese_and_crackers(cheese_count, boxes_of_crackers):\n print(\"You have\" + str(cheese_count) + \"Cheeses!\")\n print \"You have %d cheeses!\" % cheese_count\n print \"You have %d boxes of crackers!\" % boxes_of_crackers\n print \"Man that's enough for a party!\"\n print \"Get a blanket.\\n\"\n\n\n\n\n#name space main\ndef main():\n cheese = input(\"how much cheese do you want: \")\n cracker = input(\"# crackers?: \")\n cheese_and_crackers(cheese, cracker)\n \n \n \nmain()\n#scope global\n"
] | true |
99,596 |
55085335c32483f6a92b637d114b4b4d08288519
|
def main():
n, m = map(int, input().split())
As = list(map(int, input().split()))
if n >= sum(As):
ans = n - sum(As)
else:
ans = -1
print(ans)
if __name__ == "__main__":
main()
|
[
"def main():\n n, m = map(int, input().split())\n As = list(map(int, input().split()))\n if n >= sum(As):\n ans = n - sum(As)\n else:\n ans = -1\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()\n",
"def main():\n n, m = map(int, input().split())\n As = list(map(int, input().split()))\n if n >= sum(As):\n ans = n - sum(As)\n else:\n ans = -1\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"def main():\n n, m = map(int, input().split())\n As = list(map(int, input().split()))\n if n >= sum(As):\n ans = n - sum(As)\n else:\n ans = -1\n print(ans)\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
99,597 |
2baa6df8f3d2acf39afced6fe48181bac41a636d
|
"""
Created on 2021/07/12
@author Sangwoo Han
"""
import os
import pickle
import time
from datetime import timedelta
from typing import Dict, Optional, Tuple, Union
import numpy as np
import torch
from logzero import logger
from m2m_text.datasets.text import TextDataset
from scipy.sparse import csr_matrix
from torch.utils.data import Dataset
from transformers import AutoTokenizer
class BertDataset(Dataset):
"""Warpping Dataset class for BERT"""
def __init__(
self,
dataset: TextDataset,
model_name: str,
verbose: bool = True,
**tokenizer_kwargs,
) -> None:
self.dataset = dataset
self.verbose = verbose
self.train = self.dataset.train
self.npz_path = "train_" if self.train else "test_"
self.npz_path += model_name.replace("-", "_")
self.npz_path += f"_{tokenizer_kwargs['max_length']}L.npz"
self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)
self.input_ids, self.attention_mask = self._load_data(
model_name, **tokenizer_kwargs
)
def _load_data(
self, model_name: str, **tokenizer_kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
if os.path.isfile(self.npz_path):
with np.load(self.npz_path) as npz:
return torch.from_numpy(npz["input_ids"]), torch.from_numpy(
npz["attention_mask"]
)
with open(self.dataset.tokenized_path, "rb") as f:
texts = pickle.load(f)
os.environ["TOKENIZERS_PARALLELISM"] = "true"
tokenizer = AutoTokenizer.from_pretrained(model_name)
if self.verbose:
logger.info("Tokenize...")
start = time.time()
inputs = tokenizer([" ".join(s) for s in texts], **tokenizer_kwargs)
if self.verbose:
elapsed = time.time() - start
logger.info(
f"Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}"
)
np.savez(
self.npz_path,
input_ids=inputs["input_ids"].numpy(),
attention_mask=inputs["attention_mask"].numpy(),
)
return inputs["input_ids"], inputs["attention_mask"]
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
return (
self.input_ids[idx],
self.attention_mask[idx],
torch.from_numpy(self.y[idx].toarray().squeeze()).float(),
)
@property
def raw_y(self) -> np.ndarray:
return self.dataset.raw_y
@property
def y(self) -> csr_matrix:
return self.dataset.y
class SBertDataset(Dataset):
"""Warpping Dataset class for senteice BERT"""
def __init__(
self,
inputs: Dict[str, torch.Tensor],
labels: Optional[csr_matrix],
train: bool = True,
) -> None:
self.inputs = inputs
self.labels = labels
self.is_train = train
if train and labels is None:
raise ValueError("labels should be set when is_train is true")
def __len__(self) -> int:
return self.inputs["input_ids"].shape[0]
def __getitem__(
self, idx: int
) -> Union[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
]:
if self.is_train:
return (
idx,
self.inputs["input_ids"][idx],
self.inputs["attention_mask"][idx],
torch.from_numpy(self.labels[idx].toarray().squeeze()),
)
else:
return (
idx,
self.inputs["input_ids"][idx],
self.inputs["attention_mask"][idx],
)
def collate_fn(batch):
if len(batch[0]) == 4:
return (
torch.LongTensor([b[0] for b in batch]),
{
"input_ids": torch.stack([b[1] for b in batch]),
"attention_mask": torch.stack([b[2] for b in batch]),
},
torch.stack([b[3] for b in batch]),
)
else:
return (
torch.LongTensor([b[0] for b in batch]),
{
"input_ids": torch.stack([b[1] for b in batch]),
"attention_mask": torch.stack([b[2] for b in batch]),
},
)
def collate_fn2(batch):
return (
{
"input_ids": torch.stack([b[0] for b in batch]),
"attention_mask": torch.stack([b[1] for b in batch]),
},
torch.stack([b[2] for b in batch]),
)
|
[
"\"\"\"\nCreated on 2021/07/12\n@author Sangwoo Han\n\"\"\"\nimport os\nimport pickle\nimport time\nfrom datetime import timedelta\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom logzero import logger\nfrom m2m_text.datasets.text import TextDataset\nfrom scipy.sparse import csr_matrix\nfrom torch.utils.data import Dataset\nfrom transformers import AutoTokenizer\n\n\nclass BertDataset(Dataset):\n \"\"\"Warpping Dataset class for BERT\"\"\"\n\n def __init__(\n self,\n dataset: TextDataset,\n model_name: str,\n verbose: bool = True,\n **tokenizer_kwargs,\n ) -> None:\n self.dataset = dataset\n self.verbose = verbose\n\n self.train = self.dataset.train\n\n self.npz_path = \"train_\" if self.train else \"test_\"\n self.npz_path += model_name.replace(\"-\", \"_\")\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n\n self.input_ids, self.attention_mask = self._load_data(\n model_name, **tokenizer_kwargs\n )\n\n def _load_data(\n self, model_name: str, **tokenizer_kwargs\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz[\"input_ids\"]), torch.from_numpy(\n npz[\"attention_mask\"]\n )\n\n with open(self.dataset.tokenized_path, \"rb\") as f:\n texts = pickle.load(f)\n\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if self.verbose:\n logger.info(\"Tokenize...\")\n start = time.time()\n\n inputs = tokenizer([\" \".join(s) for s in texts], **tokenizer_kwargs)\n\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f\"Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}\"\n )\n\n np.savez(\n self.npz_path,\n input_ids=inputs[\"input_ids\"].numpy(),\n attention_mask=inputs[\"attention_mask\"].numpy(),\n )\n\n return inputs[\"input_ids\"], inputs[\"attention_mask\"]\n\n def __len__(self) -> int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n return (\n self.input_ids[idx],\n self.attention_mask[idx],\n torch.from_numpy(self.y[idx].toarray().squeeze()).float(),\n )\n\n @property\n def raw_y(self) -> np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) -> csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(\n self,\n inputs: Dict[str, torch.Tensor],\n labels: Optional[csr_matrix],\n train: bool = True,\n ) -> None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n\n if train and labels is None:\n raise ValueError(\"labels should be set when is_train is true\")\n\n def __len__(self) -> int:\n return self.inputs[\"input_ids\"].shape[0]\n\n def __getitem__(\n self, idx: int\n ) -> Union[\n Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],\n Tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n ]:\n if self.is_train:\n return (\n idx,\n self.inputs[\"input_ids\"][idx],\n self.inputs[\"attention_mask\"][idx],\n torch.from_numpy(self.labels[idx].toarray().squeeze()),\n )\n else:\n return (\n idx,\n self.inputs[\"input_ids\"][idx],\n self.inputs[\"attention_mask\"][idx],\n )\n\n\ndef collate_fn(batch):\n if len(batch[0]) == 4:\n return (\n torch.LongTensor([b[0] for b in batch]),\n {\n \"input_ids\": torch.stack([b[1] for b in batch]),\n \"attention_mask\": torch.stack([b[2] for b in batch]),\n },\n torch.stack([b[3] for b in batch]),\n )\n else:\n return (\n torch.LongTensor([b[0] for b in batch]),\n {\n \"input_ids\": torch.stack([b[1] for b in batch]),\n \"attention_mask\": torch.stack([b[2] for b in batch]),\n },\n )\n\n\ndef collate_fn2(batch):\n return (\n {\n \"input_ids\": torch.stack([b[0] for b in batch]),\n \"attention_mask\": torch.stack([b[1] for b in batch]),\n },\n torch.stack([b[2] for b in batch]),\n )\n",
"<docstring token>\nimport os\nimport pickle\nimport time\nfrom datetime import timedelta\nfrom typing import Dict, Optional, Tuple, Union\nimport numpy as np\nimport torch\nfrom logzero import logger\nfrom m2m_text.datasets.text import TextDataset\nfrom scipy.sparse import csr_matrix\nfrom torch.utils.data import Dataset\nfrom transformers import AutoTokenizer\n\n\nclass BertDataset(Dataset):\n \"\"\"Warpping Dataset class for BERT\"\"\"\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n\n def _load_data(self, model_name: str, **tokenizer_kwargs) ->Tuple[torch\n .Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz['input_ids']), torch.from_numpy(npz\n ['attention_mask'])\n with open(self.dataset.tokenized_path, 'rb') as f:\n texts = pickle.load(f)\n os.environ['TOKENIZERS_PARALLELISM'] = 'true'\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n if self.verbose:\n logger.info('Tokenize...')\n start = time.time()\n inputs = tokenizer([' '.join(s) for s in texts], **tokenizer_kwargs)\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f'Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}'\n )\n np.savez(self.npz_path, input_ids=inputs['input_ids'].numpy(),\n attention_mask=inputs['attention_mask'].numpy())\n return inputs['input_ids'], inputs['attention_mask']\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\ndef collate_fn(batch):\n if len(batch[0]) == 4:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}, torch.stack([b[3] for b in batch])\n else:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}\n\n\ndef collate_fn2(batch):\n return {'input_ids': torch.stack([b[0] for b in batch]),\n 'attention_mask': torch.stack([b[1] for b in batch])}, torch.stack([\n b[2] for b in batch])\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n \"\"\"Warpping Dataset class for BERT\"\"\"\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n\n def _load_data(self, model_name: str, **tokenizer_kwargs) ->Tuple[torch\n .Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz['input_ids']), torch.from_numpy(npz\n ['attention_mask'])\n with open(self.dataset.tokenized_path, 'rb') as f:\n texts = pickle.load(f)\n os.environ['TOKENIZERS_PARALLELISM'] = 'true'\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n if self.verbose:\n logger.info('Tokenize...')\n start = time.time()\n inputs = tokenizer([' '.join(s) for s in texts], **tokenizer_kwargs)\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f'Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}'\n )\n np.savez(self.npz_path, input_ids=inputs['input_ids'].numpy(),\n attention_mask=inputs['attention_mask'].numpy())\n return inputs['input_ids'], inputs['attention_mask']\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\ndef collate_fn(batch):\n if len(batch[0]) == 4:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}, torch.stack([b[3] for b in batch])\n else:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}\n\n\ndef collate_fn2(batch):\n return {'input_ids': torch.stack([b[0] for b in batch]),\n 'attention_mask': torch.stack([b[1] for b in batch])}, torch.stack([\n b[2] for b in batch])\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n \"\"\"Warpping Dataset class for BERT\"\"\"\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n\n def _load_data(self, model_name: str, **tokenizer_kwargs) ->Tuple[torch\n .Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz['input_ids']), torch.from_numpy(npz\n ['attention_mask'])\n with open(self.dataset.tokenized_path, 'rb') as f:\n texts = pickle.load(f)\n os.environ['TOKENIZERS_PARALLELISM'] = 'true'\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n if self.verbose:\n logger.info('Tokenize...')\n start = time.time()\n inputs = tokenizer([' '.join(s) for s in texts], **tokenizer_kwargs)\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f'Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}'\n )\n np.savez(self.npz_path, input_ids=inputs['input_ids'].numpy(),\n attention_mask=inputs['attention_mask'].numpy())\n return inputs['input_ids'], inputs['attention_mask']\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\ndef collate_fn(batch):\n if len(batch[0]) == 4:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}, torch.stack([b[3] for b in batch])\n else:\n return torch.LongTensor([b[0] for b in batch]), {'input_ids': torch\n .stack([b[1] for b in batch]), 'attention_mask': torch.stack([b\n [2] for b in batch])}\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n \"\"\"Warpping Dataset class for BERT\"\"\"\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n\n def _load_data(self, model_name: str, **tokenizer_kwargs) ->Tuple[torch\n .Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz['input_ids']), torch.from_numpy(npz\n ['attention_mask'])\n with open(self.dataset.tokenized_path, 'rb') as f:\n texts = pickle.load(f)\n os.environ['TOKENIZERS_PARALLELISM'] = 'true'\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n if self.verbose:\n logger.info('Tokenize...')\n start = time.time()\n inputs = tokenizer([' '.join(s) for s in texts], **tokenizer_kwargs)\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f'Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}'\n )\n np.savez(self.npz_path, input_ids=inputs['input_ids'].numpy(),\n attention_mask=inputs['attention_mask'].numpy())\n return inputs['input_ids'], inputs['attention_mask']\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n\n def _load_data(self, model_name: str, **tokenizer_kwargs) ->Tuple[torch\n .Tensor, torch.Tensor]:\n if os.path.isfile(self.npz_path):\n with np.load(self.npz_path) as npz:\n return torch.from_numpy(npz['input_ids']), torch.from_numpy(npz\n ['attention_mask'])\n with open(self.dataset.tokenized_path, 'rb') as f:\n texts = pickle.load(f)\n os.environ['TOKENIZERS_PARALLELISM'] = 'true'\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n if self.verbose:\n logger.info('Tokenize...')\n start = time.time()\n inputs = tokenizer([' '.join(s) for s in texts], **tokenizer_kwargs)\n if self.verbose:\n elapsed = time.time() - start\n logger.info(\n f'Finish Tokenization. {elapsed:.2f}s {timedelta(seconds=elapsed)}'\n )\n np.savez(self.npz_path, input_ids=inputs['input_ids'].numpy(),\n attention_mask=inputs['attention_mask'].numpy())\n return inputs['input_ids'], inputs['attention_mask']\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n\n def __init__(self, dataset: TextDataset, model_name: str, verbose: bool\n =True, **tokenizer_kwargs) ->None:\n self.dataset = dataset\n self.verbose = verbose\n self.train = self.dataset.train\n self.npz_path = 'train_' if self.train else 'test_'\n self.npz_path += model_name.replace('-', '_')\n self.npz_path += f\"_{tokenizer_kwargs['max_length']}L.npz\"\n self.npz_path = os.path.join(self.dataset.data_dir, self.npz_path)\n self.input_ids, self.attention_mask = self._load_data(model_name,\n **tokenizer_kwargs)\n <function token>\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n\n @property\n def y(self) ->csr_matrix:\n return self.dataset.y\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n\n def __len__(self) ->int:\n return len(self.dataset)\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n <function token>\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def __getitem__(self, idx: int) ->Tuple[torch.Tensor, torch.Tensor,\n torch.Tensor]:\n return self.input_ids[idx], self.attention_mask[idx], torch.from_numpy(\n self.y[idx].toarray().squeeze()).float()\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n <function token>\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def raw_y(self) ->np.ndarray:\n return self.dataset.raw_y\n <function token>\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\nclass BertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass SBertDataset(Dataset):\n \"\"\"Warpping Dataset class for senteice BERT\"\"\"\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass SBertDataset(Dataset):\n <docstring token>\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n\n def __len__(self) ->int:\n return self.inputs['input_ids'].shape[0]\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass SBertDataset(Dataset):\n <docstring token>\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n <function token>\n\n def __getitem__(self, idx: int) ->Union[Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.\n Tensor, torch.Tensor]]:\n if self.is_train:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx], torch.from_numpy(self.labels[idx].\n toarray().squeeze())\n else:\n return idx, self.inputs['input_ids'][idx], self.inputs[\n 'attention_mask'][idx]\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass SBertDataset(Dataset):\n <docstring token>\n\n def __init__(self, inputs: Dict[str, torch.Tensor], labels: Optional[\n csr_matrix], train: bool=True) ->None:\n self.inputs = inputs\n self.labels = labels\n self.is_train = train\n if train and labels is None:\n raise ValueError('labels should be set when is_train is true')\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n\n\nclass SBertDataset(Dataset):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n"
] | false |
99,598 |
1c2bf1f77726938b9d39604725036a34ff2ba0b1
|
def caught_speeding(speed, is_birthday):
j = 0
if is_birthday:
j = 5
if speed <= 60 + j:
return 0
elif speed in range(61 + j, 81 + j):
return 1
else:
return 2
|
[
"def caught_speeding(speed, is_birthday):\n j = 0\n\n if is_birthday:\n j = 5\n\n if speed <= 60 + j:\n return 0\n elif speed in range(61 + j, 81 + j):\n return 1\n else:\n return 2",
"def caught_speeding(speed, is_birthday):\n j = 0\n if is_birthday:\n j = 5\n if speed <= 60 + j:\n return 0\n elif speed in range(61 + j, 81 + j):\n return 1\n else:\n return 2\n",
"<function token>\n"
] | false |
99,599 |
b94c54d1c0d2de6f402a72272296f5cf18088534
|
import os
datasetLst = [line.rstrip('\n') for line in open('NCTC_ds.txt')]
for i in range(len(datasetLst)):
if len(datasetLst[i])==0:
break
ds = datasetLst[i]
fldr = ds + "_filtered"
os.system("mkdir {}".format(fldr))
os.system("cp filtered_fasta/{}_reads.fasta {}/reads.fasta".format(ds,fldr))
os.system("cp groundTruths/{}_daligner_ground_truth.txt {}/daligner_ground_truth.txt".format(ds,fldr))
|
[
"import os\n\ndatasetLst = [line.rstrip('\\n') for line in open('NCTC_ds.txt')]\nfor i in range(len(datasetLst)):\n\tif len(datasetLst[i])==0:\n\t\tbreak\n\tds = datasetLst[i]\n\tfldr = ds + \"_filtered\"\n\n\tos.system(\"mkdir {}\".format(fldr))\n\tos.system(\"cp filtered_fasta/{}_reads.fasta {}/reads.fasta\".format(ds,fldr))\n\tos.system(\"cp groundTruths/{}_daligner_ground_truth.txt {}/daligner_ground_truth.txt\".format(ds,fldr))",
"import os\ndatasetLst = [line.rstrip('\\n') for line in open('NCTC_ds.txt')]\nfor i in range(len(datasetLst)):\n if len(datasetLst[i]) == 0:\n break\n ds = datasetLst[i]\n fldr = ds + '_filtered'\n os.system('mkdir {}'.format(fldr))\n os.system('cp filtered_fasta/{}_reads.fasta {}/reads.fasta'.format(ds,\n fldr))\n os.system(\n 'cp groundTruths/{}_daligner_ground_truth.txt {}/daligner_ground_truth.txt'\n .format(ds, fldr))\n",
"<import token>\ndatasetLst = [line.rstrip('\\n') for line in open('NCTC_ds.txt')]\nfor i in range(len(datasetLst)):\n if len(datasetLst[i]) == 0:\n break\n ds = datasetLst[i]\n fldr = ds + '_filtered'\n os.system('mkdir {}'.format(fldr))\n os.system('cp filtered_fasta/{}_reads.fasta {}/reads.fasta'.format(ds,\n fldr))\n os.system(\n 'cp groundTruths/{}_daligner_ground_truth.txt {}/daligner_ground_truth.txt'\n .format(ds, fldr))\n",
"<import token>\n<assignment token>\nfor i in range(len(datasetLst)):\n if len(datasetLst[i]) == 0:\n break\n ds = datasetLst[i]\n fldr = ds + '_filtered'\n os.system('mkdir {}'.format(fldr))\n os.system('cp filtered_fasta/{}_reads.fasta {}/reads.fasta'.format(ds,\n fldr))\n os.system(\n 'cp groundTruths/{}_daligner_ground_truth.txt {}/daligner_ground_truth.txt'\n .format(ds, fldr))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.